summaryrefslogtreecommitdiff
path: root/drivers/net/wireless
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless')
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/Makefile2
-rw-r--r--drivers/net/wireless/ath/Kconfig1
-rw-r--r--drivers/net/wireless/ath/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig39
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile20
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c295
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h224
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c1189
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h516
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c665
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h369
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c503
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h90
-rw-r--r--drivers/net/wireless/ath/ath10k/hif.h137
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c1000
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h368
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c152
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h1338
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c1167
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c510
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h304
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c3069
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h61
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c2507
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h355
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h990
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h449
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.c20
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h170
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c417
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.h39
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c2081
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h3052
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c79
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h14
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c51
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c8
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c14
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c12
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c36
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig10
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c89
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h23
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_initvals.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c80
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c107
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h345
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h1774
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h16
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c31
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c430
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h59
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h24
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_debug.c99
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c81
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c28
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h20
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c66
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c55
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c101
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h13
-rw-r--r--drivers/net/wireless/ath/ath9k/wow.c168
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c377
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h3
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c182
-rw-r--r--drivers/net/wireless/ath/regd.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/Kconfig12
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile21
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c38
-rw-r--r--drivers/net/wireless/ath/wil6210/debug.c69
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c12
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c29
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c64
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c54
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.c20
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.h235
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c205
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h36
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h28
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c60
-rw-r--r--drivers/net/wireless/b43/Kconfig12
-rw-r--r--drivers/net/wireless/b43/main.c12
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c302
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c117
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c18
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c54
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c176
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c951
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h1
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h20
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h21
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c23
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ampdu.c2
-rw-r--r--drivers/net/wireless/cw1200/Kconfig30
-rw-r--r--drivers/net/wireless/cw1200/Makefile21
-rw-r--r--drivers/net/wireless/cw1200/bh.c619
-rw-r--r--drivers/net/wireless/cw1200/bh.h28
-rw-r--r--drivers/net/wireless/cw1200/cw1200.h323
-rw-r--r--drivers/net/wireless/cw1200/cw1200_sdio.c425
-rw-r--r--drivers/net/wireless/cw1200/cw1200_spi.c471
-rw-r--r--drivers/net/wireless/cw1200/debug.c428
-rw-r--r--drivers/net/wireless/cw1200/debug.h93
-rw-r--r--drivers/net/wireless/cw1200/fwio.c520
-rw-r--r--drivers/net/wireless/cw1200/fwio.h39
-rw-r--r--drivers/net/wireless/cw1200/hwbus.h33
-rw-r--r--drivers/net/wireless/cw1200/hwio.c312
-rw-r--r--drivers/net/wireless/cw1200/hwio.h247
-rw-r--r--drivers/net/wireless/cw1200/main.c605
-rw-r--r--drivers/net/wireless/cw1200/pm.c367
-rw-r--r--drivers/net/wireless/cw1200/pm.h43
-rw-r--r--drivers/net/wireless/cw1200/queue.c583
-rw-r--r--drivers/net/wireless/cw1200/queue.h116
-rw-r--r--drivers/net/wireless/cw1200/scan.c461
-rw-r--r--drivers/net/wireless/cw1200/scan.h56
-rw-r--r--drivers/net/wireless/cw1200/sta.c2403
-rw-r--r--drivers/net/wireless/cw1200/sta.h123
-rw-r--r--drivers/net/wireless/cw1200/txrx.c1473
-rw-r--r--drivers/net/wireless/cw1200/txrx.h106
-rw-r--r--drivers/net/wireless/cw1200/wsm.c1822
-rw-r--r--drivers/net/wireless/cw1200/wsm.h1870
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c1
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_rx.c2
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c5
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c18
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c25
-rw-r--r--drivers/net/wireless/iwlegacy/commands.h8
-rw-r--r--drivers/net/wireless/iwlegacy/common.c11
-rw-r--r--drivers/net/wireless/iwlegacy/common.h41
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig10
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile7
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/Makefile1
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h58
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.c8
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/commands.h12
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h73
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/devices.c107
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c26
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c37
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c69
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.c6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c51
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rx.c42
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c12
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/testmode.c471
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c24
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c39
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c65
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h54
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c20
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c39
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.c852
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.h161
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.h309
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h21
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/Makefile2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/bt-coex.c29
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c212
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c459
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h98
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h233
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c37
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c97
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c364
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h205
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c212
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c37
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c76
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c218
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c25
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c171
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h17
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c22
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c25
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c58
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tt.c530
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c7
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c41
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c55
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c123
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c73
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c40
-rw-r--r--drivers/net/wireless/libertas/mesh.c2
-rw-r--r--drivers/net/wireless/mwifiex/11h.c101
-rw-r--r--drivers/net/wireless/mwifiex/Kconfig4
-rw-r--r--drivers/net/wireless/mwifiex/Makefile1
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c126
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c3
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c1
-rw-r--r--drivers/net/wireless/mwifiex/fw.h33
-rw-r--r--drivers/net/wireless/mwifiex/init.c125
-rw-r--r--drivers/net/wireless/mwifiex/join.c13
-rw-r--r--drivers/net/wireless/mwifiex/main.c114
-rw-r--r--drivers/net/wireless/mwifiex/main.h33
-rw-r--r--drivers/net/wireless/mwifiex/scan.c60
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c548
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h341
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c62
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c17
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c11
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c56
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c21
-rw-r--r--drivers/net/wireless/mwifiex/uap_event.c25
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c5
-rw-r--r--drivers/net/wireless/mwl8k.c11
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.h2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c3
-rw-r--r--drivers/net/wireless/p54/p54spi.c37
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig2
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c66
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c66
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c66
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h12
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c835
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c68
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c127
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h9
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c54
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c9
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c50
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h21
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c60
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c58
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig72
-rw-r--r--drivers/net/wireless/rtlwifi/Makefile10
-rw-r--r--drivers/net/wireless/rtlwifi/base.c21
-rw-r--r--drivers/net/wireless/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/rtlwifi/core.c1
-rw-r--r--drivers/net/wireless/rtlwifi/debug.c1
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c1
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c22
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c16
-rw-r--r--drivers/net/wireless/rtlwifi/ps.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/rf.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/sw.c6
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c9
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c30
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c47
-rw-r--r--drivers/net/wireless/ti/wl18xx/reg.h15
-rw-r--r--drivers/net/wireless/ti/wlcore/Makefile2
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c284
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c14
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c216
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.h28
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c2
296 files changed, 46918 insertions, 6420 deletions
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index f8f0156dff4e..200020eb3005 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -280,5 +280,6 @@ source "drivers/net/wireless/rtlwifi/Kconfig"
source "drivers/net/wireless/ti/Kconfig"
source "drivers/net/wireless/zd1211rw/Kconfig"
source "drivers/net/wireless/mwifiex/Kconfig"
+source "drivers/net/wireless/cw1200/Kconfig"
endif # WLAN
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 67156efe14c4..0fab227025be 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -57,3 +57,5 @@ obj-$(CONFIG_MWIFIEX) += mwifiex/
obj-$(CONFIG_BRCMFMAC) += brcm80211/
obj-$(CONFIG_BRCMSMAC) += brcm80211/
+
+obj-$(CONFIG_CW1200) += cw1200/
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 2c02b4e84094..1abf1d421173 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -31,5 +31,6 @@ source "drivers/net/wireless/ath/carl9170/Kconfig"
source "drivers/net/wireless/ath/ath6kl/Kconfig"
source "drivers/net/wireless/ath/ar5523/Kconfig"
source "drivers/net/wireless/ath/wil6210/Kconfig"
+source "drivers/net/wireless/ath/ath10k/Kconfig"
endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index 97b964ded2be..fb05cfd19361 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_CARL9170) += carl9170/
obj-$(CONFIG_ATH6KL) += ath6kl/
obj-$(CONFIG_AR5523) += ar5523/
obj-$(CONFIG_WIL6210) += wil6210/
+obj-$(CONFIG_ATH10K) += ath10k/
obj-$(CONFIG_ATH_COMMON) += ath.o
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 4521342c62cc..daeafeff186b 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -239,13 +239,12 @@ enum ATH_DEBUG {
ATH_DBG_CONFIG = 0x00000200,
ATH_DBG_FATAL = 0x00000400,
ATH_DBG_PS = 0x00000800,
- ATH_DBG_HWTIMER = 0x00001000,
- ATH_DBG_BTCOEX = 0x00002000,
- ATH_DBG_WMI = 0x00004000,
- ATH_DBG_BSTUCK = 0x00008000,
- ATH_DBG_MCI = 0x00010000,
- ATH_DBG_DFS = 0x00020000,
- ATH_DBG_WOW = 0x00040000,
+ ATH_DBG_BTCOEX = 0x00001000,
+ ATH_DBG_WMI = 0x00002000,
+ ATH_DBG_BSTUCK = 0x00004000,
+ ATH_DBG_MCI = 0x00008000,
+ ATH_DBG_DFS = 0x00010000,
+ ATH_DBG_WOW = 0x00020000,
ATH_DBG_ANY = 0xffffffff
};
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
new file mode 100644
index 000000000000..82e8088ca9b4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -0,0 +1,39 @@
+config ATH10K
+ tristate "Atheros 802.11ac wireless cards support"
+ depends on MAC80211 && HAS_DMA
+ select ATH_COMMON
+ ---help---
+ This module adds support for wireless adapters based on
+ Atheros IEEE 802.11ac family of chipsets.
+
+ If you choose to build a module, it'll be called ath10k.
+
+config ATH10K_PCI
+ tristate "Atheros ath10k PCI support"
+ depends on ATH10K && PCI
+ ---help---
+ This module adds support for PCIE bus
+
+config ATH10K_DEBUG
+ bool "Atheros ath10k debugging"
+ depends on ATH10K
+ ---help---
+ Enables debug support
+
+ If unsure, say Y to make it easier to debug problems.
+
+config ATH10K_DEBUGFS
+ bool "Atheros ath10k debugfs support"
+ depends on ATH10K
+ ---help---
+ Enabled debugfs support
+
+ If unsure, say Y to make it easier to debug problems.
+
+config ATH10K_TRACING
+ bool "Atheros ath10k tracing support"
+ depends on ATH10K
+ depends on EVENT_TRACING
+ ---help---
+ Select this to ath10k use tracing infrastructure.
+
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
new file mode 100644
index 000000000000..a4179f49ee1f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -0,0 +1,20 @@
+obj-$(CONFIG_ATH10K) += ath10k_core.o
+ath10k_core-y += mac.o \
+ debug.o \
+ core.o \
+ htc.o \
+ htt.o \
+ htt_rx.o \
+ htt_tx.o \
+ txrx.o \
+ wmi.o \
+ bmi.o
+
+ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
+
+obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
+ath10k_pci-y += pci.o \
+ ce.o
+
+# for tracing framework to find trace.h
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
new file mode 100644
index 000000000000..1a2ef51b69d9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "bmi.h"
+#include "hif.h"
+#include "debug.h"
+#include "htc.h"
+
+int ath10k_bmi_done(struct ath10k *ar)
+{
+ struct bmi_cmd cmd;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
+ int ret;
+
+ if (ar->bmi.done_sent) {
+ ath10k_dbg(ATH10K_DBG_CORE, "%s skipped\n", __func__);
+ return 0;
+ }
+
+ ar->bmi.done_sent = true;
+ cmd.id = __cpu_to_le32(BMI_DONE);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
+ if (ret) {
+ ath10k_warn("unable to write to the device: %d\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ATH10K_DBG_CORE, "BMI done\n");
+ return 0;
+}
+
+int ath10k_bmi_get_target_info(struct ath10k *ar,
+ struct bmi_target_info *target_info)
+{
+ struct bmi_cmd cmd;
+ union bmi_resp resp;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
+ u32 resplen = sizeof(resp.get_target_info);
+ int ret;
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn("BMI Get Target Info Command disallowed\n");
+ return -EBUSY;
+ }
+
+ cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
+ if (ret) {
+ ath10k_warn("unable to get target info from device\n");
+ return ret;
+ }
+
+ if (resplen < sizeof(resp.get_target_info)) {
+ ath10k_warn("invalid get_target_info response length (%d)\n",
+ resplen);
+ return -EIO;
+ }
+
+ target_info->version = __le32_to_cpu(resp.get_target_info.version);
+ target_info->type = __le32_to_cpu(resp.get_target_info.type);
+ return 0;
+}
+
+int ath10k_bmi_read_memory(struct ath10k *ar,
+ u32 address, void *buffer, u32 length)
+{
+ struct bmi_cmd cmd;
+ union bmi_resp resp;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem);
+ u32 rxlen;
+ int ret;
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn("command disallowed\n");
+ return -EBUSY;
+ }
+
+ ath10k_dbg(ATH10K_DBG_CORE,
+ "%s: (device: 0x%p, address: 0x%x, length: %d)\n",
+ __func__, ar, address, length);
+
+ while (length) {
+ rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
+
+ cmd.id = __cpu_to_le32(BMI_READ_MEMORY);
+ cmd.read_mem.addr = __cpu_to_le32(address);
+ cmd.read_mem.len = __cpu_to_le32(rxlen);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
+ &resp, &rxlen);
+ if (ret) {
+ ath10k_warn("unable to read from the device\n");
+ return ret;
+ }
+
+ memcpy(buffer, resp.read_mem.payload, rxlen);
+ address += rxlen;
+ buffer += rxlen;
+ length -= rxlen;
+ }
+
+ return 0;
+}
+
+int ath10k_bmi_write_memory(struct ath10k *ar,
+ u32 address, const void *buffer, u32 length)
+{
+ struct bmi_cmd cmd;
+ u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.write_mem);
+ u32 txlen;
+ int ret;
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn("command disallowed\n");
+ return -EBUSY;
+ }
+
+ ath10k_dbg(ATH10K_DBG_CORE,
+ "%s: (device: 0x%p, address: 0x%x, length: %d)\n",
+ __func__, ar, address, length);
+
+ while (length) {
+ txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
+
+ /* copy before roundup to avoid reading beyond buffer*/
+ memcpy(cmd.write_mem.payload, buffer, txlen);
+ txlen = roundup(txlen, 4);
+
+ cmd.id = __cpu_to_le32(BMI_WRITE_MEMORY);
+ cmd.write_mem.addr = __cpu_to_le32(address);
+ cmd.write_mem.len = __cpu_to_le32(txlen);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
+ NULL, NULL);
+ if (ret) {
+ ath10k_warn("unable to write to the device\n");
+ return ret;
+ }
+
+ /* fixup roundup() so `length` zeroes out for last chunk */
+ txlen = min(txlen, length);
+
+ address += txlen;
+ buffer += txlen;
+ length -= txlen;
+ }
+
+ return 0;
+}
+
+int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
+{
+ struct bmi_cmd cmd;
+ union bmi_resp resp;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute);
+ u32 resplen = sizeof(resp.execute);
+ int ret;
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn("command disallowed\n");
+ return -EBUSY;
+ }
+
+ ath10k_dbg(ATH10K_DBG_CORE,
+ "%s: (device: 0x%p, address: 0x%x, param: %d)\n",
+ __func__, ar, address, *param);
+
+ cmd.id = __cpu_to_le32(BMI_EXECUTE);
+ cmd.execute.addr = __cpu_to_le32(address);
+ cmd.execute.param = __cpu_to_le32(*param);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
+ if (ret) {
+ ath10k_warn("unable to read from the device\n");
+ return ret;
+ }
+
+ if (resplen < sizeof(resp.execute)) {
+ ath10k_warn("invalid execute response length (%d)\n",
+ resplen);
+ return ret;
+ }
+
+ *param = __le32_to_cpu(resp.execute.result);
+ return 0;
+}
+
+int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
+{
+ struct bmi_cmd cmd;
+ u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data);
+ u32 txlen;
+ int ret;
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn("command disallowed\n");
+ return -EBUSY;
+ }
+
+ while (length) {
+ txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
+
+ WARN_ON_ONCE(txlen & 3);
+
+ cmd.id = __cpu_to_le32(BMI_LZ_DATA);
+ cmd.lz_data.len = __cpu_to_le32(txlen);
+ memcpy(cmd.lz_data.payload, buffer, txlen);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
+ NULL, NULL);
+ if (ret) {
+ ath10k_warn("unable to write to the device\n");
+ return ret;
+ }
+
+ buffer += txlen;
+ length -= txlen;
+ }
+
+ return 0;
+}
+
+int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
+{
+ struct bmi_cmd cmd;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
+ int ret;
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn("command disallowed\n");
+ return -EBUSY;
+ }
+
+ cmd.id = __cpu_to_le32(BMI_LZ_STREAM_START);
+ cmd.lz_start.addr = __cpu_to_le32(address);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
+ if (ret) {
+ ath10k_warn("unable to Start LZ Stream to the device\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_bmi_fast_download(struct ath10k *ar,
+ u32 address, const void *buffer, u32 length)
+{
+ u8 trailer[4] = {};
+ u32 head_len = rounddown(length, 4);
+ u32 trailer_len = length - head_len;
+ int ret;
+
+ ret = ath10k_bmi_lz_stream_start(ar, address);
+ if (ret)
+ return ret;
+
+ /* copy the last word into a zero padded buffer */
+ if (trailer_len > 0)
+ memcpy(trailer, buffer + head_len, trailer_len);
+
+ ret = ath10k_bmi_lz_data(ar, buffer, head_len);
+ if (ret)
+ return ret;
+
+ if (trailer_len > 0)
+ ret = ath10k_bmi_lz_data(ar, trailer, 4);
+
+ if (ret != 0)
+ return ret;
+
+ /*
+ * Close compressed stream and open a new (fake) one.
+ * This serves mainly to flush Target caches.
+ */
+ ret = ath10k_bmi_lz_stream_start(ar, 0x00);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
new file mode 100644
index 000000000000..32c56aa33a5e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BMI_H_
+#define _BMI_H_
+
+#include "core.h"
+
+/*
+ * Bootloader Messaging Interface (BMI)
+ *
+ * BMI is a very simple messaging interface used during initialization
+ * to read memory, write memory, execute code, and to define an
+ * application entry PC.
+ *
+ * It is used to download an application to QCA988x, to provide
+ * patches to code that is already resident on QCA988x, and generally
+ * to examine and modify state. The Host has an opportunity to use
+ * BMI only once during bootup. Once the Host issues a BMI_DONE
+ * command, this opportunity ends.
+ *
+ * The Host writes BMI requests to mailbox0, and reads BMI responses
+ * from mailbox0. BMI requests all begin with a command
+ * (see below for specific commands), and are followed by
+ * command-specific data.
+ *
+ * Flow control:
+ * The Host can only issue a command once the Target gives it a
+ * "BMI Command Credit", using AR8K Counter #4. As soon as the
+ * Target has completed a command, it issues another BMI Command
+ * Credit (so the Host can issue the next command).
+ *
+ * BMI handles all required Target-side cache flushing.
+ */
+
+/* Maximum data size used for BMI transfers */
+#define BMI_MAX_DATA_SIZE 256
+
+/* len = cmd + addr + length */
+#define BMI_MAX_CMDBUF_SIZE (BMI_MAX_DATA_SIZE + \
+ sizeof(u32) + \
+ sizeof(u32) + \
+ sizeof(u32))
+
+/* BMI Commands */
+
+enum bmi_cmd_id {
+ BMI_NO_COMMAND = 0,
+ BMI_DONE = 1,
+ BMI_READ_MEMORY = 2,
+ BMI_WRITE_MEMORY = 3,
+ BMI_EXECUTE = 4,
+ BMI_SET_APP_START = 5,
+ BMI_READ_SOC_REGISTER = 6,
+ BMI_READ_SOC_WORD = 6,
+ BMI_WRITE_SOC_REGISTER = 7,
+ BMI_WRITE_SOC_WORD = 7,
+ BMI_GET_TARGET_ID = 8,
+ BMI_GET_TARGET_INFO = 8,
+ BMI_ROMPATCH_INSTALL = 9,
+ BMI_ROMPATCH_UNINSTALL = 10,
+ BMI_ROMPATCH_ACTIVATE = 11,
+ BMI_ROMPATCH_DEACTIVATE = 12,
+ BMI_LZ_STREAM_START = 13, /* should be followed by LZ_DATA */
+ BMI_LZ_DATA = 14,
+ BMI_NVRAM_PROCESS = 15,
+};
+
+#define BMI_NVRAM_SEG_NAME_SZ 16
+
+struct bmi_cmd {
+ __le32 id; /* enum bmi_cmd_id */
+ union {
+ struct {
+ } done;
+ struct {
+ __le32 addr;
+ __le32 len;
+ } read_mem;
+ struct {
+ __le32 addr;
+ __le32 len;
+ u8 payload[0];
+ } write_mem;
+ struct {
+ __le32 addr;
+ __le32 param;
+ } execute;
+ struct {
+ __le32 addr;
+ } set_app_start;
+ struct {
+ __le32 addr;
+ } read_soc_reg;
+ struct {
+ __le32 addr;
+ __le32 value;
+ } write_soc_reg;
+ struct {
+ } get_target_info;
+ struct {
+ __le32 rom_addr;
+ __le32 ram_addr; /* or value */
+ __le32 size;
+ __le32 activate; /* 0=install, but dont activate */
+ } rompatch_install;
+ struct {
+ __le32 patch_id;
+ } rompatch_uninstall;
+ struct {
+ __le32 count;
+ __le32 patch_ids[0]; /* length of @count */
+ } rompatch_activate;
+ struct {
+ __le32 count;
+ __le32 patch_ids[0]; /* length of @count */
+ } rompatch_deactivate;
+ struct {
+ __le32 addr;
+ } lz_start;
+ struct {
+ __le32 len; /* max BMI_MAX_DATA_SIZE */
+ u8 payload[0]; /* length of @len */
+ } lz_data;
+ struct {
+ u8 name[BMI_NVRAM_SEG_NAME_SZ];
+ } nvram_process;
+ u8 payload[BMI_MAX_CMDBUF_SIZE];
+ };
+} __packed;
+
+union bmi_resp {
+ struct {
+ u8 payload[0];
+ } read_mem;
+ struct {
+ __le32 result;
+ } execute;
+ struct {
+ __le32 value;
+ } read_soc_reg;
+ struct {
+ __le32 len;
+ __le32 version;
+ __le32 type;
+ } get_target_info;
+ struct {
+ __le32 patch_id;
+ } rompatch_install;
+ struct {
+ __le32 patch_id;
+ } rompatch_uninstall;
+ struct {
+ /* 0 = nothing executed
+ * otherwise = NVRAM segment return value */
+ __le32 result;
+ } nvram_process;
+ u8 payload[BMI_MAX_CMDBUF_SIZE];
+} __packed;
+
+struct bmi_target_info {
+ u32 version;
+ u32 type;
+};
+
+
+/* in msec */
+#define BMI_COMMUNICATION_TIMEOUT_HZ (1*HZ)
+
+#define BMI_CE_NUM_TO_TARG 0
+#define BMI_CE_NUM_TO_HOST 1
+
+int ath10k_bmi_done(struct ath10k *ar);
+int ath10k_bmi_get_target_info(struct ath10k *ar,
+ struct bmi_target_info *target_info);
+int ath10k_bmi_read_memory(struct ath10k *ar, u32 address,
+ void *buffer, u32 length);
+int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
+ const void *buffer, u32 length);
+
+#define ath10k_bmi_read32(ar, item, val) \
+ ({ \
+ int ret; \
+ u32 addr; \
+ __le32 tmp; \
+ \
+ addr = host_interest_item_address(HI_ITEM(item)); \
+ ret = ath10k_bmi_read_memory(ar, addr, (u8 *)&tmp, 4); \
+ *val = __le32_to_cpu(tmp); \
+ ret; \
+ })
+
+#define ath10k_bmi_write32(ar, item, val) \
+ ({ \
+ int ret; \
+ u32 address; \
+ __le32 v = __cpu_to_le32(val); \
+ \
+ address = host_interest_item_address(HI_ITEM(item)); \
+ ret = ath10k_bmi_write_memory(ar, address, \
+ (u8 *)&v, sizeof(v)); \
+ ret; \
+ })
+
+int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param);
+int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address);
+int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length);
+int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,
+ const void *buffer, u32 length);
+#endif /* _BMI_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
new file mode 100644
index 000000000000..61a8ac70d3ca
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -0,0 +1,1189 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hif.h"
+#include "pci.h"
+#include "ce.h"
+#include "debug.h"
+
+/*
+ * Support for Copy Engine hardware, which is mainly used for
+ * communication between Host and Target over a PCIe interconnect.
+ */
+
+/*
+ * A single CopyEngine (CE) comprises two "rings":
+ * a source ring
+ * a destination ring
+ *
+ * Each ring consists of a number of descriptors which specify
+ * an address, length, and meta-data.
+ *
+ * Typically, one side of the PCIe interconnect (Host or Target)
+ * controls one ring and the other side controls the other ring.
+ * The source side chooses when to initiate a transfer and it
+ * chooses what to send (buffer address, length). The destination
+ * side keeps a supply of "anonymous receive buffers" available and
+ * it handles incoming data as it arrives (when the destination
+ * recieves an interrupt).
+ *
+ * The sender may send a simple buffer (address/length) or it may
+ * send a small list of buffers. When a small list is sent, hardware
+ * "gathers" these and they end up in a single destination buffer
+ * with a single interrupt.
+ *
+ * There are several "contexts" managed by this layer -- more, it
+ * may seem -- than should be needed. These are provided mainly for
+ * maximum flexibility and especially to facilitate a simpler HIF
+ * implementation. There are per-CopyEngine recv, send, and watermark
+ * contexts. These are supplied by the caller when a recv, send,
+ * or watermark handler is established and they are echoed back to
+ * the caller when the respective callbacks are invoked. There is
+ * also a per-transfer context supplied by the caller when a buffer
+ * (or sendlist) is sent and when a buffer is enqueued for recv.
+ * These per-transfer contexts are echoed back to the caller when
+ * the buffer is sent/received.
+ */
+
+static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ ath10k_pci_write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n);
+}
+
+static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ return ath10k_pci_read32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS);
+}
+
+static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ void __iomem *indicator_addr;
+
+ if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) {
+ ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
+ return;
+ }
+
+ /* workaround for QCA988x_1.0 HW CE */
+ indicator_addr = ar_pci->mem + ce_ctrl_addr + DST_WATERMARK_ADDRESS;
+
+ if (ce_ctrl_addr == ath10k_ce_base_address(CDC_WAR_DATA_CE)) {
+ iowrite32((CDC_WAR_MAGIC_STR | n), indicator_addr);
+ } else {
+ unsigned long irq_flags;
+ local_irq_save(irq_flags);
+ iowrite32(1, indicator_addr);
+
+ /*
+ * PCIE write waits for ACK in IPQ8K, there is no
+ * need to read back value.
+ */
+ (void)ioread32(indicator_addr);
+ (void)ioread32(indicator_addr); /* conservative */
+
+ ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
+
+ iowrite32(0, indicator_addr);
+ local_irq_restore(irq_flags);
+ }
+}
+
+static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ return ath10k_pci_read32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS);
+}
+
+static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS);
+}
+
+static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int addr)
+{
+ ath10k_pci_write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr);
+}
+
+static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ ath10k_pci_write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n);
+}
+
+static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ u32 ctrl1_addr = ath10k_pci_read32((ar),
+ (ce_ctrl_addr) + CE_CTRL1_ADDRESS);
+
+ ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
+ (ctrl1_addr & ~CE_CTRL1_DMAX_LENGTH_MASK) |
+ CE_CTRL1_DMAX_LENGTH_SET(n));
+}
+
+static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
+
+ ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
+ (ctrl1_addr & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) |
+ CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n));
+}
+
+static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
+
+ ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
+ (ctrl1_addr & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) |
+ CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n));
+}
+
+static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_DRRI_ADDRESS);
+}
+
+static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ u32 addr)
+{
+ ath10k_pci_write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr);
+}
+
+static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ ath10k_pci_write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n);
+}
+
+static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
+
+ ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
+ (addr & ~SRC_WATERMARK_HIGH_MASK) |
+ SRC_WATERMARK_HIGH_SET(n));
+}
+
+static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
+
+ ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
+ (addr & ~SRC_WATERMARK_LOW_MASK) |
+ SRC_WATERMARK_LOW_SET(n));
+}
+
+static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
+
+ ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
+ (addr & ~DST_WATERMARK_HIGH_MASK) |
+ DST_WATERMARK_HIGH_SET(n));
+}
+
+static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
+
+ ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
+ (addr & ~DST_WATERMARK_LOW_MASK) |
+ DST_WATERMARK_LOW_SET(n));
+}
+
+static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ u32 host_ie_addr = ath10k_pci_read32(ar,
+ ce_ctrl_addr + HOST_IE_ADDRESS);
+
+ ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
+ host_ie_addr | HOST_IE_COPY_COMPLETE_MASK);
+}
+
+static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ u32 host_ie_addr = ath10k_pci_read32(ar,
+ ce_ctrl_addr + HOST_IE_ADDRESS);
+
+ ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
+ host_ie_addr & ~HOST_IE_COPY_COMPLETE_MASK);
+}
+
+static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ u32 host_ie_addr = ath10k_pci_read32(ar,
+ ce_ctrl_addr + HOST_IE_ADDRESS);
+
+ ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
+ host_ie_addr & ~CE_WATERMARK_MASK);
+}
+
+static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ u32 misc_ie_addr = ath10k_pci_read32(ar,
+ ce_ctrl_addr + MISC_IE_ADDRESS);
+
+ ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
+ misc_ie_addr | CE_ERROR_MASK);
+}
+
+static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int mask)
+{
+ ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask);
+}
+
+
+/*
+ * Guts of ath10k_ce_send, used by both ath10k_ce_send and
+ * ath10k_ce_sendlist_send.
+ * The caller takes responsibility for any needed locking.
+ */
+static int ath10k_ce_send_nolock(struct ce_state *ce_state,
+ void *per_transfer_context,
+ u32 buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ce_ring_state *src_ring = ce_state->src_ring;
+ struct ce_desc *desc, *sdesc;
+ unsigned int nentries_mask = src_ring->nentries_mask;
+ unsigned int sw_index = src_ring->sw_index;
+ unsigned int write_index = src_ring->write_index;
+ u32 ctrl_addr = ce_state->ctrl_addr;
+ u32 desc_flags = 0;
+ int ret = 0;
+
+ if (nbytes > ce_state->src_sz_max)
+ ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
+ __func__, nbytes, ce_state->src_sz_max);
+
+ ath10k_pci_wake(ar);
+
+ if (unlikely(CE_RING_DELTA(nentries_mask,
+ write_index, sw_index - 1) <= 0)) {
+ ret = -EIO;
+ goto exit;
+ }
+
+ desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
+ write_index);
+ sdesc = CE_SRC_RING_TO_DESC(src_ring->shadow_base, write_index);
+
+ desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
+
+ if (flags & CE_SEND_FLAG_GATHER)
+ desc_flags |= CE_DESC_FLAGS_GATHER;
+ if (flags & CE_SEND_FLAG_BYTE_SWAP)
+ desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
+
+ sdesc->addr = __cpu_to_le32(buffer);
+ sdesc->nbytes = __cpu_to_le16(nbytes);
+ sdesc->flags = __cpu_to_le16(desc_flags);
+
+ *desc = *sdesc;
+
+ src_ring->per_transfer_context[write_index] = per_transfer_context;
+
+ /* Update Source Ring Write Index */
+ write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+
+ /* WORKAROUND */
+ if (!(flags & CE_SEND_FLAG_GATHER))
+ ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
+
+ src_ring->write_index = write_index;
+exit:
+ ath10k_pci_sleep(ar);
+ return ret;
+}
+
+int ath10k_ce_send(struct ce_state *ce_state,
+ void *per_transfer_context,
+ u32 buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ spin_lock_bh(&ar_pci->ce_lock);
+ ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
+ buffer, nbytes, transfer_id, flags);
+ spin_unlock_bh(&ar_pci->ce_lock);
+
+ return ret;
+}
+
+void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, u32 buffer,
+ unsigned int nbytes, u32 flags)
+{
+ unsigned int num_items = sendlist->num_items;
+ struct ce_sendlist_item *item;
+
+ item = &sendlist->item[num_items];
+ item->data = buffer;
+ item->u.nbytes = nbytes;
+ item->flags = flags;
+ sendlist->num_items++;
+}
+
+int ath10k_ce_sendlist_send(struct ce_state *ce_state,
+ void *per_transfer_context,
+ struct ce_sendlist *sendlist,
+ unsigned int transfer_id)
+{
+ struct ce_ring_state *src_ring = ce_state->src_ring;
+ struct ce_sendlist_item *item;
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ unsigned int nentries_mask = src_ring->nentries_mask;
+ unsigned int num_items = sendlist->num_items;
+ unsigned int sw_index;
+ unsigned int write_index;
+ int i, delta, ret = -ENOMEM;
+
+ spin_lock_bh(&ar_pci->ce_lock);
+
+ sw_index = src_ring->sw_index;
+ write_index = src_ring->write_index;
+
+ delta = CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
+
+ if (delta >= num_items) {
+ /*
+ * Handle all but the last item uniformly.
+ */
+ for (i = 0; i < num_items - 1; i++) {
+ item = &sendlist->item[i];
+ ret = ath10k_ce_send_nolock(ce_state,
+ CE_SENDLIST_ITEM_CTXT,
+ (u32) item->data,
+ item->u.nbytes, transfer_id,
+ item->flags |
+ CE_SEND_FLAG_GATHER);
+ if (ret)
+ ath10k_warn("CE send failed for item: %d\n", i);
+ }
+ /*
+ * Provide valid context pointer for final item.
+ */
+ item = &sendlist->item[i];
+ ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
+ (u32) item->data, item->u.nbytes,
+ transfer_id, item->flags);
+ if (ret)
+ ath10k_warn("CE send failed for last item: %d\n", i);
+ }
+
+ spin_unlock_bh(&ar_pci->ce_lock);
+
+ return ret;
+}
+
+int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
+ void *per_recv_context,
+ u32 buffer)
+{
+ struct ce_ring_state *dest_ring = ce_state->dest_ring;
+ u32 ctrl_addr = ce_state->ctrl_addr;
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ unsigned int nentries_mask = dest_ring->nentries_mask;
+ unsigned int write_index;
+ unsigned int sw_index;
+ int ret;
+
+ spin_lock_bh(&ar_pci->ce_lock);
+ write_index = dest_ring->write_index;
+ sw_index = dest_ring->sw_index;
+
+ ath10k_pci_wake(ar);
+
+ if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
+ struct ce_desc *base = dest_ring->base_addr_owner_space;
+ struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
+
+ /* Update destination descriptor */
+ desc->addr = __cpu_to_le32(buffer);
+ desc->nbytes = 0;
+
+ dest_ring->per_transfer_context[write_index] =
+ per_recv_context;
+
+ /* Update Destination Ring Write Index */
+ write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+ ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
+ dest_ring->write_index = write_index;
+ ret = 0;
+ } else {
+ ret = -EIO;
+ }
+ ath10k_pci_sleep(ar);
+ spin_unlock_bh(&ar_pci->ce_lock);
+
+ return ret;
+}
+
+/*
+ * Guts of ath10k_ce_completed_recv_next.
+ * The caller takes responsibility for any necessary locking.
+ */
+static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state,
+ void **per_transfer_contextp,
+ u32 *bufferp,
+ unsigned int *nbytesp,
+ unsigned int *transfer_idp,
+ unsigned int *flagsp)
+{
+ struct ce_ring_state *dest_ring = ce_state->dest_ring;
+ unsigned int nentries_mask = dest_ring->nentries_mask;
+ unsigned int sw_index = dest_ring->sw_index;
+
+ struct ce_desc *base = dest_ring->base_addr_owner_space;
+ struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
+ struct ce_desc sdesc;
+ u16 nbytes;
+
+ /* Copy in one go for performance reasons */
+ sdesc = *desc;
+
+ nbytes = __le16_to_cpu(sdesc.nbytes);
+ if (nbytes == 0) {
+ /*
+ * This closes a relatively unusual race where the Host
+ * sees the updated DRRI before the update to the
+ * corresponding descriptor has completed. We treat this
+ * as a descriptor that is not yet done.
+ */
+ return -EIO;
+ }
+
+ desc->nbytes = 0;
+
+ /* Return data from completed destination descriptor */
+ *bufferp = __le32_to_cpu(sdesc.addr);
+ *nbytesp = nbytes;
+ *transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA);
+
+ if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP)
+ *flagsp = CE_RECV_FLAG_SWAPPED;
+ else
+ *flagsp = 0;
+
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ dest_ring->per_transfer_context[sw_index];
+
+ /* sanity */
+ dest_ring->per_transfer_context[sw_index] = NULL;
+
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ dest_ring->sw_index = sw_index;
+
+ return 0;
+}
+
+int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
+ void **per_transfer_contextp,
+ u32 *bufferp,
+ unsigned int *nbytesp,
+ unsigned int *transfer_idp,
+ unsigned int *flagsp)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ spin_lock_bh(&ar_pci->ce_lock);
+ ret = ath10k_ce_completed_recv_next_nolock(ce_state,
+ per_transfer_contextp,
+ bufferp, nbytesp,
+ transfer_idp, flagsp);
+ spin_unlock_bh(&ar_pci->ce_lock);
+
+ return ret;
+}
+
+int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
+ void **per_transfer_contextp,
+ u32 *bufferp)
+{
+ struct ce_ring_state *dest_ring;
+ unsigned int nentries_mask;
+ unsigned int sw_index;
+ unsigned int write_index;
+ int ret;
+ struct ath10k *ar;
+ struct ath10k_pci *ar_pci;
+
+ dest_ring = ce_state->dest_ring;
+
+ if (!dest_ring)
+ return -EIO;
+
+ ar = ce_state->ar;
+ ar_pci = ath10k_pci_priv(ar);
+
+ spin_lock_bh(&ar_pci->ce_lock);
+
+ nentries_mask = dest_ring->nentries_mask;
+ sw_index = dest_ring->sw_index;
+ write_index = dest_ring->write_index;
+ if (write_index != sw_index) {
+ struct ce_desc *base = dest_ring->base_addr_owner_space;
+ struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
+
+ /* Return data from completed destination descriptor */
+ *bufferp = __le32_to_cpu(desc->addr);
+
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ dest_ring->per_transfer_context[sw_index];
+
+ /* sanity */
+ dest_ring->per_transfer_context[sw_index] = NULL;
+
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ dest_ring->sw_index = sw_index;
+ ret = 0;
+ } else {
+ ret = -EIO;
+ }
+
+ spin_unlock_bh(&ar_pci->ce_lock);
+
+ return ret;
+}
+
+/*
+ * Guts of ath10k_ce_completed_send_next.
+ * The caller takes responsibility for any necessary locking.
+ */
+static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,
+ void **per_transfer_contextp,
+ u32 *bufferp,
+ unsigned int *nbytesp,
+ unsigned int *transfer_idp)
+{
+ struct ce_ring_state *src_ring = ce_state->src_ring;
+ u32 ctrl_addr = ce_state->ctrl_addr;
+ struct ath10k *ar = ce_state->ar;
+ unsigned int nentries_mask = src_ring->nentries_mask;
+ unsigned int sw_index = src_ring->sw_index;
+ unsigned int read_index;
+ int ret = -EIO;
+
+ if (src_ring->hw_index == sw_index) {
+ /*
+ * The SW completion index has caught up with the cached
+ * version of the HW completion index.
+ * Update the cached HW completion index to see whether
+ * the SW has really caught up to the HW, or if the cached
+ * value of the HW index has become stale.
+ */
+ ath10k_pci_wake(ar);
+ src_ring->hw_index =
+ ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+ ath10k_pci_sleep(ar);
+ }
+ read_index = src_ring->hw_index;
+
+ if ((read_index != sw_index) && (read_index != 0xffffffff)) {
+ struct ce_desc *sbase = src_ring->shadow_base;
+ struct ce_desc *sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
+
+ /* Return data from completed source descriptor */
+ *bufferp = __le32_to_cpu(sdesc->addr);
+ *nbytesp = __le16_to_cpu(sdesc->nbytes);
+ *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
+ CE_DESC_FLAGS_META_DATA);
+
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ src_ring->per_transfer_context[sw_index];
+
+ /* sanity */
+ src_ring->per_transfer_context[sw_index] = NULL;
+
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ src_ring->sw_index = sw_index;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/* NB: Modeled after ath10k_ce_completed_send_next */
+int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
+ void **per_transfer_contextp,
+ u32 *bufferp,
+ unsigned int *nbytesp,
+ unsigned int *transfer_idp)
+{
+ struct ce_ring_state *src_ring;
+ unsigned int nentries_mask;
+ unsigned int sw_index;
+ unsigned int write_index;
+ int ret;
+ struct ath10k *ar;
+ struct ath10k_pci *ar_pci;
+
+ src_ring = ce_state->src_ring;
+
+ if (!src_ring)
+ return -EIO;
+
+ ar = ce_state->ar;
+ ar_pci = ath10k_pci_priv(ar);
+
+ spin_lock_bh(&ar_pci->ce_lock);
+
+ nentries_mask = src_ring->nentries_mask;
+ sw_index = src_ring->sw_index;
+ write_index = src_ring->write_index;
+
+ if (write_index != sw_index) {
+ struct ce_desc *base = src_ring->base_addr_owner_space;
+ struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
+
+ /* Return data from completed source descriptor */
+ *bufferp = __le32_to_cpu(desc->addr);
+ *nbytesp = __le16_to_cpu(desc->nbytes);
+ *transfer_idp = MS(__le16_to_cpu(desc->flags),
+ CE_DESC_FLAGS_META_DATA);
+
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ src_ring->per_transfer_context[sw_index];
+
+ /* sanity */
+ src_ring->per_transfer_context[sw_index] = NULL;
+
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ src_ring->sw_index = sw_index;
+ ret = 0;
+ } else {
+ ret = -EIO;
+ }
+
+ spin_unlock_bh(&ar_pci->ce_lock);
+
+ return ret;
+}
+
+int ath10k_ce_completed_send_next(struct ce_state *ce_state,
+ void **per_transfer_contextp,
+ u32 *bufferp,
+ unsigned int *nbytesp,
+ unsigned int *transfer_idp)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ spin_lock_bh(&ar_pci->ce_lock);
+ ret = ath10k_ce_completed_send_next_nolock(ce_state,
+ per_transfer_contextp,
+ bufferp, nbytesp,
+ transfer_idp);
+ spin_unlock_bh(&ar_pci->ce_lock);
+
+ return ret;
+}
+
+/*
+ * Guts of interrupt handler for per-engine interrupts on a particular CE.
+ *
+ * Invokes registered callbacks for recv_complete,
+ * send_complete, and watermarks.
+ */
+void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id];
+ u32 ctrl_addr = ce_state->ctrl_addr;
+ void *transfer_context;
+ u32 buf;
+ unsigned int nbytes;
+ unsigned int id;
+ unsigned int flags;
+
+ ath10k_pci_wake(ar);
+ spin_lock_bh(&ar_pci->ce_lock);
+
+ /* Clear the copy-complete interrupts that will be handled here. */
+ ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
+ HOST_IS_COPY_COMPLETE_MASK);
+
+ if (ce_state->recv_cb) {
+ /*
+ * Pop completed recv buffers and call the registered
+ * recv callback for each
+ */
+ while (ath10k_ce_completed_recv_next_nolock(ce_state,
+ &transfer_context,
+ &buf, &nbytes,
+ &id, &flags) == 0) {
+ spin_unlock_bh(&ar_pci->ce_lock);
+ ce_state->recv_cb(ce_state, transfer_context, buf,
+ nbytes, id, flags);
+ spin_lock_bh(&ar_pci->ce_lock);
+ }
+ }
+
+ if (ce_state->send_cb) {
+ /*
+ * Pop completed send buffers and call the registered
+ * send callback for each
+ */
+ while (ath10k_ce_completed_send_next_nolock(ce_state,
+ &transfer_context,
+ &buf,
+ &nbytes,
+ &id) == 0) {
+ spin_unlock_bh(&ar_pci->ce_lock);
+ ce_state->send_cb(ce_state, transfer_context,
+ buf, nbytes, id);
+ spin_lock_bh(&ar_pci->ce_lock);
+ }
+ }
+
+ /*
+ * Misc CE interrupts are not being handled, but still need
+ * to be cleared.
+ */
+ ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK);
+
+ spin_unlock_bh(&ar_pci->ce_lock);
+ ath10k_pci_sleep(ar);
+}
+
+/*
+ * Handler for per-engine interrupts on ALL active CEs.
+ * This is used in cases where the system is sharing a
+ * single interrput for all CEs
+ */
+
+void ath10k_ce_per_engine_service_any(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ce_id;
+ u32 intr_summary;
+
+ ath10k_pci_wake(ar);
+ intr_summary = CE_INTERRUPT_SUMMARY(ar);
+
+ for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) {
+ if (intr_summary & (1 << ce_id))
+ intr_summary &= ~(1 << ce_id);
+ else
+ /* no intr pending on this CE */
+ continue;
+
+ ath10k_ce_per_engine_service(ar, ce_id);
+ }
+
+ ath10k_pci_sleep(ar);
+}
+
+/*
+ * Adjust interrupts for the copy complete handler.
+ * If it's needed for either send or recv, then unmask
+ * this interrupt; otherwise, mask it.
+ *
+ * Called with ce_lock held.
+ */
+static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state,
+ int disable_copy_compl_intr)
+{
+ u32 ctrl_addr = ce_state->ctrl_addr;
+ struct ath10k *ar = ce_state->ar;
+
+ ath10k_pci_wake(ar);
+
+ if ((!disable_copy_compl_intr) &&
+ (ce_state->send_cb || ce_state->recv_cb))
+ ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr);
+ else
+ ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
+
+ ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
+
+ ath10k_pci_sleep(ar);
+}
+
+void ath10k_ce_disable_interrupts(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ce_id;
+
+ ath10k_pci_wake(ar);
+ for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) {
+ struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id];
+ u32 ctrl_addr = ce_state->ctrl_addr;
+
+ ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
+ }
+ ath10k_pci_sleep(ar);
+}
+
+void ath10k_ce_send_cb_register(struct ce_state *ce_state,
+ void (*send_cb) (struct ce_state *ce_state,
+ void *transfer_context,
+ u32 buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id),
+ int disable_interrupts)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ spin_lock_bh(&ar_pci->ce_lock);
+ ce_state->send_cb = send_cb;
+ ath10k_ce_per_engine_handler_adjust(ce_state, disable_interrupts);
+ spin_unlock_bh(&ar_pci->ce_lock);
+}
+
+void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
+ void (*recv_cb) (struct ce_state *ce_state,
+ void *transfer_context,
+ u32 buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags))
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ spin_lock_bh(&ar_pci->ce_lock);
+ ce_state->recv_cb = recv_cb;
+ ath10k_ce_per_engine_handler_adjust(ce_state, 0);
+ spin_unlock_bh(&ar_pci->ce_lock);
+}
+
+static int ath10k_ce_init_src_ring(struct ath10k *ar,
+ unsigned int ce_id,
+ struct ce_state *ce_state,
+ const struct ce_attr *attr)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ce_ring_state *src_ring;
+ unsigned int nentries = attr->src_nentries;
+ unsigned int ce_nbytes;
+ u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+ dma_addr_t base_addr;
+ char *ptr;
+
+ nentries = roundup_pow_of_two(nentries);
+
+ if (ce_state->src_ring) {
+ WARN_ON(ce_state->src_ring->nentries != nentries);
+ return 0;
+ }
+
+ ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *));
+ ptr = kzalloc(ce_nbytes, GFP_KERNEL);
+ if (ptr == NULL)
+ return -ENOMEM;
+
+ ce_state->src_ring = (struct ce_ring_state *)ptr;
+ src_ring = ce_state->src_ring;
+
+ ptr += sizeof(struct ce_ring_state);
+ src_ring->nentries = nentries;
+ src_ring->nentries_mask = nentries - 1;
+
+ ath10k_pci_wake(ar);
+ src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+ src_ring->hw_index = src_ring->sw_index;
+
+ src_ring->write_index =
+ ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
+ ath10k_pci_sleep(ar);
+
+ src_ring->per_transfer_context = (void **)ptr;
+
+ /*
+ * Legacy platforms that do not support cache
+ * coherent DMA are unsupported
+ */
+ src_ring->base_addr_owner_space_unaligned =
+ pci_alloc_consistent(ar_pci->pdev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ &base_addr);
+ src_ring->base_addr_ce_space_unaligned = base_addr;
+
+ src_ring->base_addr_owner_space = PTR_ALIGN(
+ src_ring->base_addr_owner_space_unaligned,
+ CE_DESC_RING_ALIGN);
+ src_ring->base_addr_ce_space = ALIGN(
+ src_ring->base_addr_ce_space_unaligned,
+ CE_DESC_RING_ALIGN);
+
+ /*
+ * Also allocate a shadow src ring in regular
+ * mem to use for faster access.
+ */
+ src_ring->shadow_base_unaligned =
+ kmalloc((nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN), GFP_KERNEL);
+
+ src_ring->shadow_base = PTR_ALIGN(
+ src_ring->shadow_base_unaligned,
+ CE_DESC_RING_ALIGN);
+
+ ath10k_pci_wake(ar);
+ ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
+ src_ring->base_addr_ce_space);
+ ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
+ ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
+ ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
+ ath10k_pci_sleep(ar);
+
+ return 0;
+}
+
+static int ath10k_ce_init_dest_ring(struct ath10k *ar,
+ unsigned int ce_id,
+ struct ce_state *ce_state,
+ const struct ce_attr *attr)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ce_ring_state *dest_ring;
+ unsigned int nentries = attr->dest_nentries;
+ unsigned int ce_nbytes;
+ u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+ dma_addr_t base_addr;
+ char *ptr;
+
+ nentries = roundup_pow_of_two(nentries);
+
+ if (ce_state->dest_ring) {
+ WARN_ON(ce_state->dest_ring->nentries != nentries);
+ return 0;
+ }
+
+ ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *));
+ ptr = kzalloc(ce_nbytes, GFP_KERNEL);
+ if (ptr == NULL)
+ return -ENOMEM;
+
+ ce_state->dest_ring = (struct ce_ring_state *)ptr;
+ dest_ring = ce_state->dest_ring;
+
+ ptr += sizeof(struct ce_ring_state);
+ dest_ring->nentries = nentries;
+ dest_ring->nentries_mask = nentries - 1;
+
+ ath10k_pci_wake(ar);
+ dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
+ dest_ring->write_index =
+ ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
+ ath10k_pci_sleep(ar);
+
+ dest_ring->per_transfer_context = (void **)ptr;
+
+ /*
+ * Legacy platforms that do not support cache
+ * coherent DMA are unsupported
+ */
+ dest_ring->base_addr_owner_space_unaligned =
+ pci_alloc_consistent(ar_pci->pdev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ &base_addr);
+ dest_ring->base_addr_ce_space_unaligned = base_addr;
+
+ /*
+ * Correctly initialize memory to 0 to prevent garbage
+ * data crashing system when download firmware
+ */
+ memset(dest_ring->base_addr_owner_space_unaligned, 0,
+ nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN);
+
+ dest_ring->base_addr_owner_space = PTR_ALIGN(
+ dest_ring->base_addr_owner_space_unaligned,
+ CE_DESC_RING_ALIGN);
+ dest_ring->base_addr_ce_space = ALIGN(
+ dest_ring->base_addr_ce_space_unaligned,
+ CE_DESC_RING_ALIGN);
+
+ ath10k_pci_wake(ar);
+ ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
+ dest_ring->base_addr_ce_space);
+ ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
+ ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
+ ath10k_pci_sleep(ar);
+
+ return 0;
+}
+
+static struct ce_state *ath10k_ce_init_state(struct ath10k *ar,
+ unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ce_state *ce_state = NULL;
+ u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+
+ spin_lock_bh(&ar_pci->ce_lock);
+
+ if (!ar_pci->ce_id_to_state[ce_id]) {
+ ce_state = kzalloc(sizeof(*ce_state), GFP_ATOMIC);
+ if (ce_state == NULL) {
+ spin_unlock_bh(&ar_pci->ce_lock);
+ return NULL;
+ }
+
+ ar_pci->ce_id_to_state[ce_id] = ce_state;
+ ce_state->ar = ar;
+ ce_state->id = ce_id;
+ ce_state->ctrl_addr = ctrl_addr;
+ ce_state->state = CE_RUNNING;
+ /* Save attribute flags */
+ ce_state->attr_flags = attr->flags;
+ ce_state->src_sz_max = attr->src_sz_max;
+ }
+
+ spin_unlock_bh(&ar_pci->ce_lock);
+
+ return ce_state;
+}
+
+/*
+ * Initialize a Copy Engine based on caller-supplied attributes.
+ * This may be called once to initialize both source and destination
+ * rings or it may be called twice for separate source and destination
+ * initialization. It may be that only one side or the other is
+ * initialized by software/firmware.
+ */
+struct ce_state *ath10k_ce_init(struct ath10k *ar,
+ unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ce_state *ce_state;
+ u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+
+ ce_state = ath10k_ce_init_state(ar, ce_id, attr);
+ if (!ce_state) {
+ ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id);
+ return NULL;
+ }
+
+ if (attr->src_nentries) {
+ if (ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr)) {
+ ath10k_err("Failed to initialize CE src ring for ID: %d\n",
+ ce_id);
+ ath10k_ce_deinit(ce_state);
+ return NULL;
+ }
+ }
+
+ if (attr->dest_nentries) {
+ if (ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr)) {
+ ath10k_err("Failed to initialize CE dest ring for ID: %d\n",
+ ce_id);
+ ath10k_ce_deinit(ce_state);
+ return NULL;
+ }
+ }
+
+ /* Enable CE error interrupts */
+ ath10k_pci_wake(ar);
+ ath10k_ce_error_intr_enable(ar, ctrl_addr);
+ ath10k_pci_sleep(ar);
+
+ return ce_state;
+}
+
+void ath10k_ce_deinit(struct ce_state *ce_state)
+{
+ unsigned int ce_id = ce_state->id;
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ ce_state->state = CE_UNUSED;
+ ar_pci->ce_id_to_state[ce_id] = NULL;
+
+ if (ce_state->src_ring) {
+ kfree(ce_state->src_ring->shadow_base_unaligned);
+ pci_free_consistent(ar_pci->pdev,
+ (ce_state->src_ring->nentries *
+ sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ ce_state->src_ring->base_addr_owner_space,
+ ce_state->src_ring->base_addr_ce_space);
+ kfree(ce_state->src_ring);
+ }
+
+ if (ce_state->dest_ring) {
+ pci_free_consistent(ar_pci->pdev,
+ (ce_state->dest_ring->nentries *
+ sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ ce_state->dest_ring->base_addr_owner_space,
+ ce_state->dest_ring->base_addr_ce_space);
+ kfree(ce_state->dest_ring);
+ }
+ kfree(ce_state);
+}
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
new file mode 100644
index 000000000000..c17f07c026f4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -0,0 +1,516 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _CE_H_
+#define _CE_H_
+
+#include "hif.h"
+
+
+/* Maximum number of Copy Engine's supported */
+#define CE_COUNT_MAX 8
+#define CE_HTT_H2T_MSG_SRC_NENTRIES 2048
+
+/* Descriptor rings must be aligned to this boundary */
+#define CE_DESC_RING_ALIGN 8
+#define CE_SENDLIST_ITEMS_MAX 12
+#define CE_SEND_FLAG_GATHER 0x00010000
+
+/*
+ * Copy Engine support: low-level Target-side Copy Engine API.
+ * This is a hardware access layer used by code that understands
+ * how to use copy engines.
+ */
+
+struct ce_state;
+
+
+/* Copy Engine operational state */
+enum ce_op_state {
+ CE_UNUSED,
+ CE_PAUSED,
+ CE_RUNNING,
+};
+
+#define CE_DESC_FLAGS_GATHER (1 << 0)
+#define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
+#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
+#define CE_DESC_FLAGS_META_DATA_LSB 3
+
+struct ce_desc {
+ __le32 addr;
+ __le16 nbytes;
+ __le16 flags; /* %CE_DESC_FLAGS_ */
+};
+
+/* Copy Engine Ring internal state */
+struct ce_ring_state {
+ /* Number of entries in this ring; must be power of 2 */
+ unsigned int nentries;
+ unsigned int nentries_mask;
+
+ /*
+ * For dest ring, this is the next index to be processed
+ * by software after it was/is received into.
+ *
+ * For src ring, this is the last descriptor that was sent
+ * and completion processed by software.
+ *
+ * Regardless of src or dest ring, this is an invariant
+ * (modulo ring size):
+ * write index >= read index >= sw_index
+ */
+ unsigned int sw_index;
+ /* cached copy */
+ unsigned int write_index;
+ /*
+ * For src ring, this is the next index not yet processed by HW.
+ * This is a cached copy of the real HW index (read index), used
+ * for avoiding reading the HW index register more often than
+ * necessary.
+ * This extends the invariant:
+ * write index >= read index >= hw_index >= sw_index
+ *
+ * For dest ring, this is currently unused.
+ */
+ /* cached copy */
+ unsigned int hw_index;
+
+ /* Start of DMA-coherent area reserved for descriptors */
+ /* Host address space */
+ void *base_addr_owner_space_unaligned;
+ /* CE address space */
+ u32 base_addr_ce_space_unaligned;
+
+ /*
+ * Actual start of descriptors.
+ * Aligned to descriptor-size boundary.
+ * Points into reserved DMA-coherent area, above.
+ */
+ /* Host address space */
+ void *base_addr_owner_space;
+
+ /* CE address space */
+ u32 base_addr_ce_space;
+ /*
+ * Start of shadow copy of descriptors, within regular memory.
+ * Aligned to descriptor-size boundary.
+ */
+ void *shadow_base_unaligned;
+ struct ce_desc *shadow_base;
+
+ void **per_transfer_context;
+};
+
+/* Copy Engine internal state */
+struct ce_state {
+ struct ath10k *ar;
+ unsigned int id;
+
+ unsigned int attr_flags;
+
+ u32 ctrl_addr;
+ enum ce_op_state state;
+
+ void (*send_cb) (struct ce_state *ce_state,
+ void *per_transfer_send_context,
+ u32 buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id);
+ void (*recv_cb) (struct ce_state *ce_state,
+ void *per_transfer_recv_context,
+ u32 buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags);
+
+ unsigned int src_sz_max;
+ struct ce_ring_state *src_ring;
+ struct ce_ring_state *dest_ring;
+};
+
+struct ce_sendlist_item {
+ /* e.g. buffer or desc list */
+ dma_addr_t data;
+ union {
+ /* simple buffer */
+ unsigned int nbytes;
+ /* Rx descriptor list */
+ unsigned int ndesc;
+ } u;
+ /* externally-specified flags; OR-ed with internal flags */
+ u32 flags;
+};
+
+struct ce_sendlist {
+ unsigned int num_items;
+ struct ce_sendlist_item item[CE_SENDLIST_ITEMS_MAX];
+};
+
+/* Copy Engine settable attributes */
+struct ce_attr;
+
+/*==================Send====================*/
+
+/* ath10k_ce_send flags */
+#define CE_SEND_FLAG_BYTE_SWAP 1
+
+/*
+ * Queue a source buffer to be sent to an anonymous destination buffer.
+ * ce - which copy engine to use
+ * buffer - address of buffer
+ * nbytes - number of bytes to send
+ * transfer_id - arbitrary ID; reflected to destination
+ * flags - CE_SEND_FLAG_* values
+ * Returns 0 on success; otherwise an error status.
+ *
+ * Note: If no flags are specified, use CE's default data swap mode.
+ *
+ * Implementation note: pushes 1 buffer to Source ring
+ */
+int ath10k_ce_send(struct ce_state *ce_state,
+ void *per_transfer_send_context,
+ u32 buffer,
+ unsigned int nbytes,
+ /* 14 bits */
+ unsigned int transfer_id,
+ unsigned int flags);
+
+void ath10k_ce_send_cb_register(struct ce_state *ce_state,
+ void (*send_cb) (struct ce_state *ce_state,
+ void *transfer_context,
+ u32 buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id),
+ int disable_interrupts);
+
+/* Append a simple buffer (address/length) to a sendlist. */
+void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist,
+ u32 buffer,
+ unsigned int nbytes,
+ /* OR-ed with internal flags */
+ u32 flags);
+
+/*
+ * Queue a "sendlist" of buffers to be sent using gather to a single
+ * anonymous destination buffer
+ * ce - which copy engine to use
+ * sendlist - list of simple buffers to send using gather
+ * transfer_id - arbitrary ID; reflected to destination
+ * Returns 0 on success; otherwise an error status.
+ *
+ * Implemenation note: Pushes multiple buffers with Gather to Source ring.
+ */
+int ath10k_ce_sendlist_send(struct ce_state *ce_state,
+ void *per_transfer_send_context,
+ struct ce_sendlist *sendlist,
+ /* 14 bits */
+ unsigned int transfer_id);
+
+/*==================Recv=======================*/
+
+/*
+ * Make a buffer available to receive. The buffer must be at least of a
+ * minimal size appropriate for this copy engine (src_sz_max attribute).
+ * ce - which copy engine to use
+ * per_transfer_recv_context - context passed back to caller's recv_cb
+ * buffer - address of buffer in CE space
+ * Returns 0 on success; otherwise an error status.
+ *
+ * Implemenation note: Pushes a buffer to Dest ring.
+ */
+int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
+ void *per_transfer_recv_context,
+ u32 buffer);
+
+void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
+ void (*recv_cb) (struct ce_state *ce_state,
+ void *transfer_context,
+ u32 buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags));
+
+/* recv flags */
+/* Data is byte-swapped */
+#define CE_RECV_FLAG_SWAPPED 1
+
+/*
+ * Supply data for the next completed unprocessed receive descriptor.
+ * Pops buffer from Dest ring.
+ */
+int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
+ void **per_transfer_contextp,
+ u32 *bufferp,
+ unsigned int *nbytesp,
+ unsigned int *transfer_idp,
+ unsigned int *flagsp);
+/*
+ * Supply data for the next completed unprocessed send descriptor.
+ * Pops 1 completed send buffer from Source ring.
+ */
+int ath10k_ce_completed_send_next(struct ce_state *ce_state,
+ void **per_transfer_contextp,
+ u32 *bufferp,
+ unsigned int *nbytesp,
+ unsigned int *transfer_idp);
+
+/*==================CE Engine Initialization=======================*/
+
+/* Initialize an instance of a CE */
+struct ce_state *ath10k_ce_init(struct ath10k *ar,
+ unsigned int ce_id,
+ const struct ce_attr *attr);
+
+/*==================CE Engine Shutdown=======================*/
+/*
+ * Support clean shutdown by allowing the caller to revoke
+ * receive buffers. Target DMA must be stopped before using
+ * this API.
+ */
+int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
+ void **per_transfer_contextp,
+ u32 *bufferp);
+
+/*
+ * Support clean shutdown by allowing the caller to cancel
+ * pending sends. Target DMA must be stopped before using
+ * this API.
+ */
+int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
+ void **per_transfer_contextp,
+ u32 *bufferp,
+ unsigned int *nbytesp,
+ unsigned int *transfer_idp);
+
+void ath10k_ce_deinit(struct ce_state *ce_state);
+
+/*==================CE Interrupt Handlers====================*/
+void ath10k_ce_per_engine_service_any(struct ath10k *ar);
+void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
+void ath10k_ce_disable_interrupts(struct ath10k *ar);
+
+/* ce_attr.flags values */
+/* Use NonSnooping PCIe accesses? */
+#define CE_ATTR_NO_SNOOP 1
+
+/* Byte swap data words */
+#define CE_ATTR_BYTE_SWAP_DATA 2
+
+/* Swizzle descriptors? */
+#define CE_ATTR_SWIZZLE_DESCRIPTORS 4
+
+/* no interrupt on copy completion */
+#define CE_ATTR_DIS_INTR 8
+
+/* Attributes of an instance of a Copy Engine */
+struct ce_attr {
+ /* CE_ATTR_* values */
+ unsigned int flags;
+
+ /* currently not in use */
+ unsigned int priority;
+
+ /* #entries in source ring - Must be a power of 2 */
+ unsigned int src_nentries;
+
+ /*
+ * Max source send size for this CE.
+ * This is also the minimum size of a destination buffer.
+ */
+ unsigned int src_sz_max;
+
+ /* #entries in destination ring - Must be a power of 2 */
+ unsigned int dest_nentries;
+
+ /* Future use */
+ void *reserved;
+};
+
+/*
+ * When using sendlist_send to transfer multiple buffer fragments, the
+ * transfer context of each fragment, except last one, will be filled
+ * with CE_SENDLIST_ITEM_CTXT. ce_completed_send will return success for
+ * each fragment done with send and the transfer context would be
+ * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
+ * status of a send completion.
+ */
+#define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef)
+
+#define SR_BA_ADDRESS 0x0000
+#define SR_SIZE_ADDRESS 0x0004
+#define DR_BA_ADDRESS 0x0008
+#define DR_SIZE_ADDRESS 0x000c
+#define CE_CMD_ADDRESS 0x0018
+
+#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MSB 17
+#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB 17
+#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK 0x00020000
+#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(x) \
+ (((0 | (x)) << CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) & \
+ CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK)
+
+#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MSB 16
+#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB 16
+#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK 0x00010000
+#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_GET(x) \
+ (((x) & CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) >> \
+ CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB)
+#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(x) \
+ (((0 | (x)) << CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) & \
+ CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK)
+
+#define CE_CTRL1_DMAX_LENGTH_MSB 15
+#define CE_CTRL1_DMAX_LENGTH_LSB 0
+#define CE_CTRL1_DMAX_LENGTH_MASK 0x0000ffff
+#define CE_CTRL1_DMAX_LENGTH_GET(x) \
+ (((x) & CE_CTRL1_DMAX_LENGTH_MASK) >> CE_CTRL1_DMAX_LENGTH_LSB)
+#define CE_CTRL1_DMAX_LENGTH_SET(x) \
+ (((0 | (x)) << CE_CTRL1_DMAX_LENGTH_LSB) & CE_CTRL1_DMAX_LENGTH_MASK)
+
+#define CE_CTRL1_ADDRESS 0x0010
+#define CE_CTRL1_HW_MASK 0x0007ffff
+#define CE_CTRL1_SW_MASK 0x0007ffff
+#define CE_CTRL1_HW_WRITE_MASK 0x00000000
+#define CE_CTRL1_SW_WRITE_MASK 0x0007ffff
+#define CE_CTRL1_RSTMASK 0xffffffff
+#define CE_CTRL1_RESET 0x00000080
+
+#define CE_CMD_HALT_STATUS_MSB 3
+#define CE_CMD_HALT_STATUS_LSB 3
+#define CE_CMD_HALT_STATUS_MASK 0x00000008
+#define CE_CMD_HALT_STATUS_GET(x) \
+ (((x) & CE_CMD_HALT_STATUS_MASK) >> CE_CMD_HALT_STATUS_LSB)
+#define CE_CMD_HALT_STATUS_SET(x) \
+ (((0 | (x)) << CE_CMD_HALT_STATUS_LSB) & CE_CMD_HALT_STATUS_MASK)
+#define CE_CMD_HALT_STATUS_RESET 0
+#define CE_CMD_HALT_MSB 0
+#define CE_CMD_HALT_MASK 0x00000001
+
+#define HOST_IE_COPY_COMPLETE_MSB 0
+#define HOST_IE_COPY_COMPLETE_LSB 0
+#define HOST_IE_COPY_COMPLETE_MASK 0x00000001
+#define HOST_IE_COPY_COMPLETE_GET(x) \
+ (((x) & HOST_IE_COPY_COMPLETE_MASK) >> HOST_IE_COPY_COMPLETE_LSB)
+#define HOST_IE_COPY_COMPLETE_SET(x) \
+ (((0 | (x)) << HOST_IE_COPY_COMPLETE_LSB) & HOST_IE_COPY_COMPLETE_MASK)
+#define HOST_IE_COPY_COMPLETE_RESET 0
+#define HOST_IE_ADDRESS 0x002c
+
+#define HOST_IS_DST_RING_LOW_WATERMARK_MASK 0x00000010
+#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK 0x00000008
+#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK 0x00000004
+#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK 0x00000002
+#define HOST_IS_COPY_COMPLETE_MASK 0x00000001
+#define HOST_IS_ADDRESS 0x0030
+
+#define MISC_IE_ADDRESS 0x0034
+
+#define MISC_IS_AXI_ERR_MASK 0x00000400
+
+#define MISC_IS_DST_ADDR_ERR_MASK 0x00000200
+#define MISC_IS_SRC_LEN_ERR_MASK 0x00000100
+#define MISC_IS_DST_MAX_LEN_VIO_MASK 0x00000080
+#define MISC_IS_DST_RING_OVERFLOW_MASK 0x00000040
+#define MISC_IS_SRC_RING_OVERFLOW_MASK 0x00000020
+
+#define MISC_IS_ADDRESS 0x0038
+
+#define SR_WR_INDEX_ADDRESS 0x003c
+
+#define DST_WR_INDEX_ADDRESS 0x0040
+
+#define CURRENT_SRRI_ADDRESS 0x0044
+
+#define CURRENT_DRRI_ADDRESS 0x0048
+
+#define SRC_WATERMARK_LOW_MSB 31
+#define SRC_WATERMARK_LOW_LSB 16
+#define SRC_WATERMARK_LOW_MASK 0xffff0000
+#define SRC_WATERMARK_LOW_GET(x) \
+ (((x) & SRC_WATERMARK_LOW_MASK) >> SRC_WATERMARK_LOW_LSB)
+#define SRC_WATERMARK_LOW_SET(x) \
+ (((0 | (x)) << SRC_WATERMARK_LOW_LSB) & SRC_WATERMARK_LOW_MASK)
+#define SRC_WATERMARK_LOW_RESET 0
+#define SRC_WATERMARK_HIGH_MSB 15
+#define SRC_WATERMARK_HIGH_LSB 0
+#define SRC_WATERMARK_HIGH_MASK 0x0000ffff
+#define SRC_WATERMARK_HIGH_GET(x) \
+ (((x) & SRC_WATERMARK_HIGH_MASK) >> SRC_WATERMARK_HIGH_LSB)
+#define SRC_WATERMARK_HIGH_SET(x) \
+ (((0 | (x)) << SRC_WATERMARK_HIGH_LSB) & SRC_WATERMARK_HIGH_MASK)
+#define SRC_WATERMARK_HIGH_RESET 0
+#define SRC_WATERMARK_ADDRESS 0x004c
+
+#define DST_WATERMARK_LOW_LSB 16
+#define DST_WATERMARK_LOW_MASK 0xffff0000
+#define DST_WATERMARK_LOW_SET(x) \
+ (((0 | (x)) << DST_WATERMARK_LOW_LSB) & DST_WATERMARK_LOW_MASK)
+#define DST_WATERMARK_LOW_RESET 0
+#define DST_WATERMARK_HIGH_MSB 15
+#define DST_WATERMARK_HIGH_LSB 0
+#define DST_WATERMARK_HIGH_MASK 0x0000ffff
+#define DST_WATERMARK_HIGH_GET(x) \
+ (((x) & DST_WATERMARK_HIGH_MASK) >> DST_WATERMARK_HIGH_LSB)
+#define DST_WATERMARK_HIGH_SET(x) \
+ (((0 | (x)) << DST_WATERMARK_HIGH_LSB) & DST_WATERMARK_HIGH_MASK)
+#define DST_WATERMARK_HIGH_RESET 0
+#define DST_WATERMARK_ADDRESS 0x0050
+
+
+static inline u32 ath10k_ce_base_address(unsigned int ce_id)
+{
+ return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
+}
+
+#define CE_WATERMARK_MASK (HOST_IS_SRC_RING_LOW_WATERMARK_MASK | \
+ HOST_IS_SRC_RING_HIGH_WATERMARK_MASK | \
+ HOST_IS_DST_RING_LOW_WATERMARK_MASK | \
+ HOST_IS_DST_RING_HIGH_WATERMARK_MASK)
+
+#define CE_ERROR_MASK (MISC_IS_AXI_ERR_MASK | \
+ MISC_IS_DST_ADDR_ERR_MASK | \
+ MISC_IS_SRC_LEN_ERR_MASK | \
+ MISC_IS_DST_MAX_LEN_VIO_MASK | \
+ MISC_IS_DST_RING_OVERFLOW_MASK | \
+ MISC_IS_SRC_RING_OVERFLOW_MASK)
+
+#define CE_SRC_RING_TO_DESC(baddr, idx) \
+ (&(((struct ce_desc *)baddr)[idx]))
+
+#define CE_DEST_RING_TO_DESC(baddr, idx) \
+ (&(((struct ce_desc *)baddr)[idx]))
+
+/* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */
+#define CE_RING_DELTA(nentries_mask, fromidx, toidx) \
+ (((int)(toidx)-(int)(fromidx)) & (nentries_mask))
+
+#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
+
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB 8
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \
+ (((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \
+ CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
+#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000
+
+#define CE_INTERRUPT_SUMMARY(ar) \
+ CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( \
+ ath10k_pci_read32((ar), CE_WRAPPER_BASE_ADDRESS + \
+ CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS))
+
+#endif /* _CE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
new file mode 100644
index 000000000000..2b3426b1ff3f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -0,0 +1,665 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+
+#include "core.h"
+#include "mac.h"
+#include "htc.h"
+#include "hif.h"
+#include "wmi.h"
+#include "bmi.h"
+#include "debug.h"
+#include "htt.h"
+
+unsigned int ath10k_debug_mask;
+static bool uart_print;
+static unsigned int ath10k_p2p;
+module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
+module_param(uart_print, bool, 0644);
+module_param_named(p2p, ath10k_p2p, uint, 0644);
+MODULE_PARM_DESC(debug_mask, "Debugging mask");
+MODULE_PARM_DESC(uart_print, "Uart target debugging");
+MODULE_PARM_DESC(p2p, "Enable ath10k P2P support");
+
+static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ {
+ .id = QCA988X_HW_1_0_VERSION,
+ .name = "qca988x hw1.0",
+ .patch_load_addr = QCA988X_HW_1_0_PATCH_LOAD_ADDR,
+ .fw = {
+ .dir = QCA988X_HW_1_0_FW_DIR,
+ .fw = QCA988X_HW_1_0_FW_FILE,
+ .otp = QCA988X_HW_1_0_OTP_FILE,
+ .board = QCA988X_HW_1_0_BOARD_DATA_FILE,
+ },
+ },
+ {
+ .id = QCA988X_HW_2_0_VERSION,
+ .name = "qca988x hw2.0",
+ .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
+ .fw = {
+ .dir = QCA988X_HW_2_0_FW_DIR,
+ .fw = QCA988X_HW_2_0_FW_FILE,
+ .otp = QCA988X_HW_2_0_OTP_FILE,
+ .board = QCA988X_HW_2_0_BOARD_DATA_FILE,
+ },
+ },
+};
+
+static void ath10k_send_suspend_complete(struct ath10k *ar)
+{
+ ath10k_dbg(ATH10K_DBG_CORE, "%s\n", __func__);
+
+ ar->is_target_paused = true;
+ wake_up(&ar->event_queue);
+}
+
+static int ath10k_check_fw_version(struct ath10k *ar)
+{
+ char version[32];
+
+ if (ar->fw_version_major >= SUPPORTED_FW_MAJOR &&
+ ar->fw_version_minor >= SUPPORTED_FW_MINOR &&
+ ar->fw_version_release >= SUPPORTED_FW_RELEASE &&
+ ar->fw_version_build >= SUPPORTED_FW_BUILD)
+ return 0;
+
+ snprintf(version, sizeof(version), "%u.%u.%u.%u",
+ SUPPORTED_FW_MAJOR, SUPPORTED_FW_MINOR,
+ SUPPORTED_FW_RELEASE, SUPPORTED_FW_BUILD);
+
+ ath10k_warn("WARNING: Firmware version %s is not officially supported.\n",
+ ar->hw->wiphy->fw_version);
+ ath10k_warn("Please upgrade to version %s (or newer)\n", version);
+
+ return 0;
+}
+
+static int ath10k_init_connect_htc(struct ath10k *ar)
+{
+ int status;
+
+ status = ath10k_wmi_connect_htc_service(ar);
+ if (status)
+ goto conn_fail;
+
+ /* Start HTC */
+ status = ath10k_htc_start(ar->htc);
+ if (status)
+ goto conn_fail;
+
+ /* Wait for WMI event to be ready */
+ status = ath10k_wmi_wait_for_service_ready(ar);
+ if (status <= 0) {
+ ath10k_warn("wmi service ready event not received");
+ status = -ETIMEDOUT;
+ goto timeout;
+ }
+
+ ath10k_dbg(ATH10K_DBG_CORE, "core wmi ready\n");
+ return 0;
+
+timeout:
+ ath10k_htc_stop(ar->htc);
+conn_fail:
+ return status;
+}
+
+static int ath10k_init_configure_target(struct ath10k *ar)
+{
+ u32 param_host;
+ int ret;
+
+ /* tell target which HTC version it is used*/
+ ret = ath10k_bmi_write32(ar, hi_app_host_interest,
+ HTC_PROTOCOL_VERSION);
+ if (ret) {
+ ath10k_err("settings HTC version failed\n");
+ return ret;
+ }
+
+ /* set the firmware mode to STA/IBSS/AP */
+ ret = ath10k_bmi_read32(ar, hi_option_flag, &param_host);
+ if (ret) {
+ ath10k_err("setting firmware mode (1/2) failed\n");
+ return ret;
+ }
+
+ /* TODO following parameters need to be re-visited. */
+ /* num_device */
+ param_host |= (1 << HI_OPTION_NUM_DEV_SHIFT);
+ /* Firmware mode */
+ /* FIXME: Why FW_MODE_AP ??.*/
+ param_host |= (HI_OPTION_FW_MODE_AP << HI_OPTION_FW_MODE_SHIFT);
+ /* mac_addr_method */
+ param_host |= (1 << HI_OPTION_MAC_ADDR_METHOD_SHIFT);
+ /* firmware_bridge */
+ param_host |= (0 << HI_OPTION_FW_BRIDGE_SHIFT);
+ /* fwsubmode */
+ param_host |= (0 << HI_OPTION_FW_SUBMODE_SHIFT);
+
+ ret = ath10k_bmi_write32(ar, hi_option_flag, param_host);
+ if (ret) {
+ ath10k_err("setting firmware mode (2/2) failed\n");
+ return ret;
+ }
+
+ /* We do all byte-swapping on the host */
+ ret = ath10k_bmi_write32(ar, hi_be, 0);
+ if (ret) {
+ ath10k_err("setting host CPU BE mode failed\n");
+ return ret;
+ }
+
+ /* FW descriptor/Data swap flags */
+ ret = ath10k_bmi_write32(ar, hi_fw_swap, 0);
+
+ if (ret) {
+ ath10k_err("setting FW data/desc swap flags failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar,
+ const char *dir,
+ const char *file)
+{
+ char filename[100];
+ const struct firmware *fw;
+ int ret;
+
+ if (file == NULL)
+ return ERR_PTR(-ENOENT);
+
+ if (dir == NULL)
+ dir = ".";
+
+ snprintf(filename, sizeof(filename), "%s/%s", dir, file);
+ ret = request_firmware(&fw, filename, ar->dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return fw;
+}
+
+static int ath10k_push_board_ext_data(struct ath10k *ar,
+ const struct firmware *fw)
+{
+ u32 board_data_size = QCA988X_BOARD_DATA_SZ;
+ u32 board_ext_data_size = QCA988X_BOARD_EXT_DATA_SZ;
+ u32 board_ext_data_addr;
+ int ret;
+
+ ret = ath10k_bmi_read32(ar, hi_board_ext_data, &board_ext_data_addr);
+ if (ret) {
+ ath10k_err("could not read board ext data addr (%d)\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ATH10K_DBG_CORE,
+ "ath10k: Board extended Data download addr: 0x%x\n",
+ board_ext_data_addr);
+
+ if (board_ext_data_addr == 0)
+ return 0;
+
+ if (fw->size != (board_data_size + board_ext_data_size)) {
+ ath10k_err("invalid board (ext) data sizes %zu != %d+%d\n",
+ fw->size, board_data_size, board_ext_data_size);
+ return -EINVAL;
+ }
+
+ ret = ath10k_bmi_write_memory(ar, board_ext_data_addr,
+ fw->data + board_data_size,
+ board_ext_data_size);
+ if (ret) {
+ ath10k_err("could not write board ext data (%d)\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_bmi_write32(ar, hi_board_ext_data_config,
+ (board_ext_data_size << 16) | 1);
+ if (ret) {
+ ath10k_err("could not write board ext data bit (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_download_board_data(struct ath10k *ar)
+{
+ u32 board_data_size = QCA988X_BOARD_DATA_SZ;
+ u32 address;
+ const struct firmware *fw;
+ int ret;
+
+ fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
+ ar->hw_params.fw.board);
+ if (IS_ERR(fw)) {
+ ath10k_err("could not fetch board data fw file (%ld)\n",
+ PTR_ERR(fw));
+ return PTR_ERR(fw);
+ }
+
+ ret = ath10k_push_board_ext_data(ar, fw);
+ if (ret) {
+ ath10k_err("could not push board ext data (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = ath10k_bmi_read32(ar, hi_board_data, &address);
+ if (ret) {
+ ath10k_err("could not read board data addr (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = ath10k_bmi_write_memory(ar, address, fw->data,
+ min_t(u32, board_data_size, fw->size));
+ if (ret) {
+ ath10k_err("could not write board data (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = ath10k_bmi_write32(ar, hi_board_data_initialized, 1);
+ if (ret) {
+ ath10k_err("could not write board data bit (%d)\n", ret);
+ goto exit;
+ }
+
+exit:
+ release_firmware(fw);
+ return ret;
+}
+
+static int ath10k_download_and_run_otp(struct ath10k *ar)
+{
+ const struct firmware *fw;
+ u32 address;
+ u32 exec_param;
+ int ret;
+
+ /* OTP is optional */
+
+ if (ar->hw_params.fw.otp == NULL) {
+ ath10k_info("otp file not defined\n");
+ return 0;
+ }
+
+ address = ar->hw_params.patch_load_addr;
+
+ fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
+ ar->hw_params.fw.otp);
+ if (IS_ERR(fw)) {
+ ath10k_warn("could not fetch otp (%ld)\n", PTR_ERR(fw));
+ return 0;
+ }
+
+ ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
+ if (ret) {
+ ath10k_err("could not write otp (%d)\n", ret);
+ goto exit;
+ }
+
+ exec_param = 0;
+ ret = ath10k_bmi_execute(ar, address, &exec_param);
+ if (ret) {
+ ath10k_err("could not execute otp (%d)\n", ret);
+ goto exit;
+ }
+
+exit:
+ release_firmware(fw);
+ return ret;
+}
+
+static int ath10k_download_fw(struct ath10k *ar)
+{
+ const struct firmware *fw;
+ u32 address;
+ int ret;
+
+ if (ar->hw_params.fw.fw == NULL)
+ return -EINVAL;
+
+ address = ar->hw_params.patch_load_addr;
+
+ fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
+ ar->hw_params.fw.fw);
+ if (IS_ERR(fw)) {
+ ath10k_err("could not fetch fw (%ld)\n", PTR_ERR(fw));
+ return PTR_ERR(fw);
+ }
+
+ ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
+ if (ret) {
+ ath10k_err("could not write fw (%d)\n", ret);
+ goto exit;
+ }
+
+exit:
+ release_firmware(fw);
+ return ret;
+}
+
+static int ath10k_init_download_firmware(struct ath10k *ar)
+{
+ int ret;
+
+ ret = ath10k_download_board_data(ar);
+ if (ret)
+ return ret;
+
+ ret = ath10k_download_and_run_otp(ar);
+ if (ret)
+ return ret;
+
+ ret = ath10k_download_fw(ar);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int ath10k_init_uart(struct ath10k *ar)
+{
+ int ret;
+
+ /*
+ * Explicitly setting UART prints to zero as target turns it on
+ * based on scratch registers.
+ */
+ ret = ath10k_bmi_write32(ar, hi_serial_enable, 0);
+ if (ret) {
+ ath10k_warn("could not disable UART prints (%d)\n", ret);
+ return ret;
+ }
+
+ if (!uart_print) {
+ ath10k_info("UART prints disabled\n");
+ return 0;
+ }
+
+ ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, 7);
+ if (ret) {
+ ath10k_warn("could not enable UART prints (%d)\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_bmi_write32(ar, hi_serial_enable, 1);
+ if (ret) {
+ ath10k_warn("could not enable UART prints (%d)\n", ret);
+ return ret;
+ }
+
+ ath10k_info("UART prints enabled\n");
+ return 0;
+}
+
+static int ath10k_init_hw_params(struct ath10k *ar)
+{
+ const struct ath10k_hw_params *uninitialized_var(hw_params);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) {
+ hw_params = &ath10k_hw_params_list[i];
+
+ if (hw_params->id == ar->target_version)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(ath10k_hw_params_list)) {
+ ath10k_err("Unsupported hardware version: 0x%x\n",
+ ar->target_version);
+ return -EINVAL;
+ }
+
+ ar->hw_params = *hw_params;
+
+ ath10k_info("Hardware name %s version 0x%x\n",
+ ar->hw_params.name, ar->target_version);
+
+ return 0;
+}
+
+struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
+ enum ath10k_bus bus,
+ const struct ath10k_hif_ops *hif_ops)
+{
+ struct ath10k *ar;
+
+ ar = ath10k_mac_create();
+ if (!ar)
+ return NULL;
+
+ ar->ath_common.priv = ar;
+ ar->ath_common.hw = ar->hw;
+
+ ar->p2p = !!ath10k_p2p;
+ ar->dev = dev;
+
+ ar->hif.priv = hif_priv;
+ ar->hif.ops = hif_ops;
+ ar->hif.bus = bus;
+
+ ar->free_vdev_map = 0xFF; /* 8 vdevs */
+
+ init_completion(&ar->scan.started);
+ init_completion(&ar->scan.completed);
+ init_completion(&ar->scan.on_channel);
+
+ init_completion(&ar->install_key_done);
+ init_completion(&ar->vdev_setup_done);
+
+ setup_timer(&ar->scan.timeout, ath10k_reset_scan, (unsigned long)ar);
+
+ ar->workqueue = create_singlethread_workqueue("ath10k_wq");
+ if (!ar->workqueue)
+ goto err_wq;
+
+ mutex_init(&ar->conf_mutex);
+ spin_lock_init(&ar->data_lock);
+
+ INIT_LIST_HEAD(&ar->peers);
+ init_waitqueue_head(&ar->peer_mapping_wq);
+
+ init_completion(&ar->offchan_tx_completed);
+ INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
+ skb_queue_head_init(&ar->offchan_tx_queue);
+
+ init_waitqueue_head(&ar->event_queue);
+
+ return ar;
+
+err_wq:
+ ath10k_mac_destroy(ar);
+ return NULL;
+}
+EXPORT_SYMBOL(ath10k_core_create);
+
+void ath10k_core_destroy(struct ath10k *ar)
+{
+ flush_workqueue(ar->workqueue);
+ destroy_workqueue(ar->workqueue);
+
+ ath10k_mac_destroy(ar);
+}
+EXPORT_SYMBOL(ath10k_core_destroy);
+
+
+int ath10k_core_register(struct ath10k *ar)
+{
+ struct ath10k_htc_ops htc_ops;
+ struct bmi_target_info target_info;
+ int status;
+
+ memset(&target_info, 0, sizeof(target_info));
+ status = ath10k_bmi_get_target_info(ar, &target_info);
+ if (status)
+ goto err;
+
+ ar->target_version = target_info.version;
+ ar->hw->wiphy->hw_version = target_info.version;
+
+ status = ath10k_init_hw_params(ar);
+ if (status)
+ goto err;
+
+ if (ath10k_init_configure_target(ar)) {
+ status = -EINVAL;
+ goto err;
+ }
+
+ status = ath10k_init_download_firmware(ar);
+ if (status)
+ goto err;
+
+ status = ath10k_init_uart(ar);
+ if (status)
+ goto err;
+
+ htc_ops.target_send_suspend_complete = ath10k_send_suspend_complete;
+
+ ar->htc = ath10k_htc_create(ar, &htc_ops);
+ if (IS_ERR(ar->htc)) {
+ status = PTR_ERR(ar->htc);
+ ath10k_err("could not create HTC (%d)\n", status);
+ goto err;
+ }
+
+ status = ath10k_bmi_done(ar);
+ if (status)
+ goto err_htc_destroy;
+
+ status = ath10k_wmi_attach(ar);
+ if (status) {
+ ath10k_err("WMI attach failed: %d\n", status);
+ goto err_htc_destroy;
+ }
+
+ status = ath10k_htc_wait_target(ar->htc);
+ if (status)
+ goto err_wmi_detach;
+
+ ar->htt = ath10k_htt_attach(ar);
+ if (!ar->htt) {
+ status = -ENOMEM;
+ goto err_wmi_detach;
+ }
+
+ status = ath10k_init_connect_htc(ar);
+ if (status)
+ goto err_htt_detach;
+
+ ath10k_info("firmware %s booted\n", ar->hw->wiphy->fw_version);
+
+ status = ath10k_check_fw_version(ar);
+ if (status)
+ goto err_disconnect_htc;
+
+ status = ath10k_wmi_cmd_init(ar);
+ if (status) {
+ ath10k_err("could not send WMI init command (%d)\n", status);
+ goto err_disconnect_htc;
+ }
+
+ status = ath10k_wmi_wait_for_unified_ready(ar);
+ if (status <= 0) {
+ ath10k_err("wmi unified ready event not received\n");
+ status = -ETIMEDOUT;
+ goto err_disconnect_htc;
+ }
+
+ status = ath10k_htt_attach_target(ar->htt);
+ if (status)
+ goto err_disconnect_htc;
+
+ status = ath10k_mac_register(ar);
+ if (status)
+ goto err_disconnect_htc;
+
+ status = ath10k_debug_create(ar);
+ if (status) {
+ ath10k_err("unable to initialize debugfs\n");
+ goto err_unregister_mac;
+ }
+
+ return 0;
+
+err_unregister_mac:
+ ath10k_mac_unregister(ar);
+err_disconnect_htc:
+ ath10k_htc_stop(ar->htc);
+err_htt_detach:
+ ath10k_htt_detach(ar->htt);
+err_wmi_detach:
+ ath10k_wmi_detach(ar);
+err_htc_destroy:
+ ath10k_htc_destroy(ar->htc);
+err:
+ return status;
+}
+EXPORT_SYMBOL(ath10k_core_register);
+
+void ath10k_core_unregister(struct ath10k *ar)
+{
+ /* We must unregister from mac80211 before we stop HTC and HIF.
+ * Otherwise we will fail to submit commands to FW and mac80211 will be
+ * unhappy about callback failures. */
+ ath10k_mac_unregister(ar);
+ ath10k_htc_stop(ar->htc);
+ ath10k_htt_detach(ar->htt);
+ ath10k_wmi_detach(ar);
+ ath10k_htc_destroy(ar->htc);
+}
+EXPORT_SYMBOL(ath10k_core_unregister);
+
+int ath10k_core_target_suspend(struct ath10k *ar)
+{
+ int ret;
+
+ ath10k_dbg(ATH10K_DBG_CORE, "%s: called", __func__);
+
+ ret = ath10k_wmi_pdev_suspend_target(ar);
+ if (ret)
+ ath10k_warn("could not suspend target (%d)\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(ath10k_core_target_suspend);
+
+int ath10k_core_target_resume(struct ath10k *ar)
+{
+ int ret;
+
+ ath10k_dbg(ATH10K_DBG_CORE, "%s: called", __func__);
+
+ ret = ath10k_wmi_pdev_resume_target(ar);
+ if (ret)
+ ath10k_warn("could not resume target (%d)\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(ath10k_core_target_resume);
+
+MODULE_AUTHOR("Qualcomm Atheros");
+MODULE_DESCRIPTION("Core module for QCA988X PCIe devices.");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
new file mode 100644
index 000000000000..539336d1be4b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -0,0 +1,369 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _CORE_H_
+#define _CORE_H_
+
+#include <linux/completion.h>
+#include <linux/if_ether.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+
+#include "htc.h"
+#include "hw.h"
+#include "targaddrs.h"
+#include "wmi.h"
+#include "../ath.h"
+#include "../regd.h"
+
+#define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
+#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
+#define WO(_f) ((_f##_OFFSET) >> 2)
+
+#define ATH10K_SCAN_ID 0
+#define WMI_READY_TIMEOUT (5 * HZ)
+#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ)
+
+/* Antenna noise floor */
+#define ATH10K_DEFAULT_NOISE_FLOOR -95
+
+struct ath10k;
+
+enum ath10k_bus {
+ ATH10K_BUS_PCI,
+};
+
+struct ath10k_skb_cb {
+ dma_addr_t paddr;
+ bool is_mapped;
+ bool is_aborted;
+
+ struct {
+ u8 vdev_id;
+ u16 msdu_id;
+ u8 tid;
+ bool is_offchan;
+ bool is_conf;
+ bool discard;
+ bool no_ack;
+ u8 refcount;
+ struct sk_buff *txfrag;
+ struct sk_buff *msdu;
+ } __packed htt;
+
+ /* 4 bytes left on 64bit arch */
+} __packed;
+
+static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ath10k_skb_cb) >
+ IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
+ return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
+}
+
+static inline int ath10k_skb_map(struct device *dev, struct sk_buff *skb)
+{
+ if (ATH10K_SKB_CB(skb)->is_mapped)
+ return -EINVAL;
+
+ ATH10K_SKB_CB(skb)->paddr = dma_map_single(dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev, ATH10K_SKB_CB(skb)->paddr)))
+ return -EIO;
+
+ ATH10K_SKB_CB(skb)->is_mapped = true;
+ return 0;
+}
+
+static inline int ath10k_skb_unmap(struct device *dev, struct sk_buff *skb)
+{
+ if (!ATH10K_SKB_CB(skb)->is_mapped)
+ return -EINVAL;
+
+ dma_unmap_single(dev, ATH10K_SKB_CB(skb)->paddr, skb->len,
+ DMA_TO_DEVICE);
+ ATH10K_SKB_CB(skb)->is_mapped = false;
+ return 0;
+}
+
+static inline u32 host_interest_item_address(u32 item_offset)
+{
+ return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
+}
+
+struct ath10k_bmi {
+ bool done_sent;
+};
+
+struct ath10k_wmi {
+ enum ath10k_htc_ep_id eid;
+ struct completion service_ready;
+ struct completion unified_ready;
+ atomic_t pending_tx_count;
+ wait_queue_head_t wq;
+
+ struct sk_buff_head wmi_event_list;
+ struct work_struct wmi_event_work;
+};
+
+struct ath10k_peer_stat {
+ u8 peer_macaddr[ETH_ALEN];
+ u32 peer_rssi;
+ u32 peer_tx_rate;
+};
+
+struct ath10k_target_stats {
+ /* PDEV stats */
+ s32 ch_noise_floor;
+ u32 tx_frame_count;
+ u32 rx_frame_count;
+ u32 rx_clear_count;
+ u32 cycle_count;
+ u32 phy_err_count;
+ u32 chan_tx_power;
+
+ /* PDEV TX stats */
+ s32 comp_queued;
+ s32 comp_delivered;
+ s32 msdu_enqued;
+ s32 mpdu_enqued;
+ s32 wmm_drop;
+ s32 local_enqued;
+ s32 local_freed;
+ s32 hw_queued;
+ s32 hw_reaped;
+ s32 underrun;
+ s32 tx_abort;
+ s32 mpdus_requed;
+ u32 tx_ko;
+ u32 data_rc;
+ u32 self_triggers;
+ u32 sw_retry_failure;
+ u32 illgl_rate_phy_err;
+ u32 pdev_cont_xretry;
+ u32 pdev_tx_timeout;
+ u32 pdev_resets;
+ u32 phy_underrun;
+ u32 txop_ovf;
+
+ /* PDEV RX stats */
+ s32 mid_ppdu_route_change;
+ s32 status_rcvd;
+ s32 r0_frags;
+ s32 r1_frags;
+ s32 r2_frags;
+ s32 r3_frags;
+ s32 htt_msdus;
+ s32 htt_mpdus;
+ s32 loc_msdus;
+ s32 loc_mpdus;
+ s32 oversize_amsdu;
+ s32 phy_errs;
+ s32 phy_err_drop;
+ s32 mpdu_errs;
+
+ /* VDEV STATS */
+
+ /* PEER STATS */
+ u8 peers;
+ struct ath10k_peer_stat peer_stat[TARGET_NUM_PEERS];
+
+ /* TODO: Beacon filter stats */
+
+};
+
+#define ATH10K_MAX_NUM_PEER_IDS (1 << 11) /* htt rx_desc limit */
+
+struct ath10k_peer {
+ struct list_head list;
+ int vdev_id;
+ u8 addr[ETH_ALEN];
+ DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS);
+ struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
+};
+
+#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
+
+struct ath10k_vif {
+ u32 vdev_id;
+ enum wmi_vdev_type vdev_type;
+ enum wmi_vdev_subtype vdev_subtype;
+ u32 beacon_interval;
+ u32 dtim_period;
+
+ struct ath10k *ar;
+ struct ieee80211_vif *vif;
+
+ struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1];
+ u8 def_wep_key_index;
+
+ u16 tx_seq_no;
+
+ union {
+ struct {
+ u8 bssid[ETH_ALEN];
+ u32 uapsd;
+ } sta;
+ struct {
+ /* 127 stations; wmi limit */
+ u8 tim_bitmap[16];
+ u8 tim_len;
+ u32 ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ bool hidden_ssid;
+ /* P2P_IE with NoA attribute for P2P_GO case */
+ u32 noa_len;
+ u8 *noa_data;
+ } ap;
+ struct {
+ u8 bssid[ETH_ALEN];
+ } ibss;
+ } u;
+};
+
+struct ath10k_vif_iter {
+ u32 vdev_id;
+ struct ath10k_vif *arvif;
+};
+
+struct ath10k_debug {
+ struct dentry *debugfs_phy;
+
+ struct ath10k_target_stats target_stats;
+ u32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
+
+ struct completion event_stats_compl;
+};
+
+struct ath10k {
+ struct ath_common ath_common;
+ struct ieee80211_hw *hw;
+ struct device *dev;
+ u8 mac_addr[ETH_ALEN];
+
+ u32 target_version;
+ u8 fw_version_major;
+ u32 fw_version_minor;
+ u16 fw_version_release;
+ u16 fw_version_build;
+ u32 phy_capability;
+ u32 hw_min_tx_power;
+ u32 hw_max_tx_power;
+ u32 ht_cap_info;
+ u32 vht_cap_info;
+
+ struct targetdef *targetdef;
+ struct hostdef *hostdef;
+
+ bool p2p;
+
+ struct {
+ void *priv;
+ enum ath10k_bus bus;
+ const struct ath10k_hif_ops *ops;
+ } hif;
+
+ struct ath10k_wmi wmi;
+
+ wait_queue_head_t event_queue;
+ bool is_target_paused;
+
+ struct ath10k_bmi bmi;
+
+ struct ath10k_htc *htc;
+ struct ath10k_htt *htt;
+
+ struct ath10k_hw_params {
+ u32 id;
+ const char *name;
+ u32 patch_load_addr;
+
+ struct ath10k_hw_params_fw {
+ const char *dir;
+ const char *fw;
+ const char *otp;
+ const char *board;
+ } fw;
+ } hw_params;
+
+ struct {
+ struct completion started;
+ struct completion completed;
+ struct completion on_channel;
+ struct timer_list timeout;
+ bool is_roc;
+ bool in_progress;
+ bool aborting;
+ int vdev_id;
+ int roc_freq;
+ } scan;
+
+ struct {
+ struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+ } mac;
+
+ /* should never be NULL; needed for regular htt rx */
+ struct ieee80211_channel *rx_channel;
+
+ /* valid during scan; needed for mgmt rx during scan */
+ struct ieee80211_channel *scan_channel;
+
+ int free_vdev_map;
+ int monitor_vdev_id;
+ bool monitor_enabled;
+ bool monitor_present;
+ unsigned int filter_flags;
+
+ struct wmi_pdev_set_wmm_params_arg wmm_params;
+ struct completion install_key_done;
+
+ struct completion vdev_setup_done;
+
+ struct workqueue_struct *workqueue;
+
+ /* prevents concurrent FW reconfiguration */
+ struct mutex conf_mutex;
+
+ /* protects shared structure data */
+ spinlock_t data_lock;
+
+ struct list_head peers;
+ wait_queue_head_t peer_mapping_wq;
+
+ struct work_struct offchan_tx_work;
+ struct sk_buff_head offchan_tx_queue;
+ struct completion offchan_tx_completed;
+ struct sk_buff *offchan_tx_skb;
+
+#ifdef CONFIG_ATH10K_DEBUGFS
+ struct ath10k_debug debug;
+#endif
+};
+
+struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
+ enum ath10k_bus bus,
+ const struct ath10k_hif_ops *hif_ops);
+void ath10k_core_destroy(struct ath10k *ar);
+
+int ath10k_core_register(struct ath10k *ar);
+void ath10k_core_unregister(struct ath10k *ar);
+
+int ath10k_core_target_suspend(struct ath10k *ar);
+int ath10k_core_target_resume(struct ath10k *ar);
+
+#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
new file mode 100644
index 000000000000..499034b873d1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -0,0 +1,503 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+
+#include "core.h"
+#include "debug.h"
+
+static int ath10k_printk(const char *level, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ int rtn;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ rtn = printk("%sath10k: %pV", level, &vaf);
+
+ va_end(args);
+
+ return rtn;
+}
+
+int ath10k_info(const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+ int ret;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ ret = ath10k_printk(KERN_INFO, "%pV", &vaf);
+ trace_ath10k_log_info(&vaf);
+ va_end(args);
+
+ return ret;
+}
+EXPORT_SYMBOL(ath10k_info);
+
+int ath10k_err(const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+ int ret;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ ret = ath10k_printk(KERN_ERR, "%pV", &vaf);
+ trace_ath10k_log_err(&vaf);
+ va_end(args);
+
+ return ret;
+}
+EXPORT_SYMBOL(ath10k_err);
+
+int ath10k_warn(const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+ int ret = 0;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+
+ if (net_ratelimit())
+ ret = ath10k_printk(KERN_WARNING, "%pV", &vaf);
+
+ trace_ath10k_log_warn(&vaf);
+
+ va_end(args);
+
+ return ret;
+}
+EXPORT_SYMBOL(ath10k_warn);
+
+#ifdef CONFIG_ATH10K_DEBUGFS
+
+void ath10k_debug_read_service_map(struct ath10k *ar,
+ void *service_map,
+ size_t map_size)
+{
+ memcpy(ar->debug.wmi_service_bitmap, service_map, map_size);
+}
+
+static ssize_t ath10k_read_wmi_services(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char *buf;
+ unsigned int len = 0, buf_len = 1500;
+ const char *status;
+ ssize_t ret_cnt;
+ int i;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (len > buf_len)
+ len = buf_len;
+
+ for (i = 0; i < WMI_SERVICE_LAST; i++) {
+ if (WMI_SERVICE_IS_ENABLED(ar->debug.wmi_service_bitmap, i))
+ status = "enabled";
+ else
+ status = "disabled";
+
+ len += scnprintf(buf + len, buf_len - len,
+ "0x%02x - %20s - %s\n",
+ i, wmi_service_name(i), status);
+ }
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ kfree(buf);
+ return ret_cnt;
+}
+
+static const struct file_operations fops_wmi_services = {
+ .read = ath10k_read_wmi_services,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath10k_debug_read_target_stats(struct ath10k *ar,
+ struct wmi_stats_event *ev)
+{
+ u8 *tmp = ev->data;
+ struct ath10k_target_stats *stats;
+ int num_pdev_stats, num_vdev_stats, num_peer_stats;
+ struct wmi_pdev_stats *ps;
+ int i;
+
+ mutex_lock(&ar->conf_mutex);
+
+ stats = &ar->debug.target_stats;
+
+ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); /* 0 or 1 */
+ num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); /* 0 or max vdevs */
+ num_peer_stats = __le32_to_cpu(ev->num_peer_stats); /* 0 or max peers */
+
+ if (num_pdev_stats) {
+ ps = (struct wmi_pdev_stats *)tmp;
+
+ stats->ch_noise_floor = __le32_to_cpu(ps->chan_nf);
+ stats->tx_frame_count = __le32_to_cpu(ps->tx_frame_count);
+ stats->rx_frame_count = __le32_to_cpu(ps->rx_frame_count);
+ stats->rx_clear_count = __le32_to_cpu(ps->rx_clear_count);
+ stats->cycle_count = __le32_to_cpu(ps->cycle_count);
+ stats->phy_err_count = __le32_to_cpu(ps->phy_err_count);
+ stats->chan_tx_power = __le32_to_cpu(ps->chan_tx_pwr);
+
+ stats->comp_queued = __le32_to_cpu(ps->wal.tx.comp_queued);
+ stats->comp_delivered =
+ __le32_to_cpu(ps->wal.tx.comp_delivered);
+ stats->msdu_enqued = __le32_to_cpu(ps->wal.tx.msdu_enqued);
+ stats->mpdu_enqued = __le32_to_cpu(ps->wal.tx.mpdu_enqued);
+ stats->wmm_drop = __le32_to_cpu(ps->wal.tx.wmm_drop);
+ stats->local_enqued = __le32_to_cpu(ps->wal.tx.local_enqued);
+ stats->local_freed = __le32_to_cpu(ps->wal.tx.local_freed);
+ stats->hw_queued = __le32_to_cpu(ps->wal.tx.hw_queued);
+ stats->hw_reaped = __le32_to_cpu(ps->wal.tx.hw_reaped);
+ stats->underrun = __le32_to_cpu(ps->wal.tx.underrun);
+ stats->tx_abort = __le32_to_cpu(ps->wal.tx.tx_abort);
+ stats->mpdus_requed = __le32_to_cpu(ps->wal.tx.mpdus_requed);
+ stats->tx_ko = __le32_to_cpu(ps->wal.tx.tx_ko);
+ stats->data_rc = __le32_to_cpu(ps->wal.tx.data_rc);
+ stats->self_triggers = __le32_to_cpu(ps->wal.tx.self_triggers);
+ stats->sw_retry_failure =
+ __le32_to_cpu(ps->wal.tx.sw_retry_failure);
+ stats->illgl_rate_phy_err =
+ __le32_to_cpu(ps->wal.tx.illgl_rate_phy_err);
+ stats->pdev_cont_xretry =
+ __le32_to_cpu(ps->wal.tx.pdev_cont_xretry);
+ stats->pdev_tx_timeout =
+ __le32_to_cpu(ps->wal.tx.pdev_tx_timeout);
+ stats->pdev_resets = __le32_to_cpu(ps->wal.tx.pdev_resets);
+ stats->phy_underrun = __le32_to_cpu(ps->wal.tx.phy_underrun);
+ stats->txop_ovf = __le32_to_cpu(ps->wal.tx.txop_ovf);
+
+ stats->mid_ppdu_route_change =
+ __le32_to_cpu(ps->wal.rx.mid_ppdu_route_change);
+ stats->status_rcvd = __le32_to_cpu(ps->wal.rx.status_rcvd);
+ stats->r0_frags = __le32_to_cpu(ps->wal.rx.r0_frags);
+ stats->r1_frags = __le32_to_cpu(ps->wal.rx.r1_frags);
+ stats->r2_frags = __le32_to_cpu(ps->wal.rx.r2_frags);
+ stats->r3_frags = __le32_to_cpu(ps->wal.rx.r3_frags);
+ stats->htt_msdus = __le32_to_cpu(ps->wal.rx.htt_msdus);
+ stats->htt_mpdus = __le32_to_cpu(ps->wal.rx.htt_mpdus);
+ stats->loc_msdus = __le32_to_cpu(ps->wal.rx.loc_msdus);
+ stats->loc_mpdus = __le32_to_cpu(ps->wal.rx.loc_mpdus);
+ stats->oversize_amsdu =
+ __le32_to_cpu(ps->wal.rx.oversize_amsdu);
+ stats->phy_errs = __le32_to_cpu(ps->wal.rx.phy_errs);
+ stats->phy_err_drop = __le32_to_cpu(ps->wal.rx.phy_err_drop);
+ stats->mpdu_errs = __le32_to_cpu(ps->wal.rx.mpdu_errs);
+
+ tmp += sizeof(struct wmi_pdev_stats);
+ }
+
+ /* 0 or max vdevs */
+ /* Currently firmware does not support VDEV stats */
+ if (num_vdev_stats) {
+ struct wmi_vdev_stats *vdev_stats;
+
+ for (i = 0; i < num_vdev_stats; i++) {
+ vdev_stats = (struct wmi_vdev_stats *)tmp;
+ tmp += sizeof(struct wmi_vdev_stats);
+ }
+ }
+
+ if (num_peer_stats) {
+ struct wmi_peer_stats *peer_stats;
+ struct ath10k_peer_stat *s;
+
+ stats->peers = num_peer_stats;
+
+ for (i = 0; i < num_peer_stats; i++) {
+ peer_stats = (struct wmi_peer_stats *)tmp;
+ s = &stats->peer_stat[i];
+
+ WMI_MAC_ADDR_TO_CHAR_ARRAY(&peer_stats->peer_macaddr,
+ s->peer_macaddr);
+ s->peer_rssi = __le32_to_cpu(peer_stats->peer_rssi);
+ s->peer_tx_rate =
+ __le32_to_cpu(peer_stats->peer_tx_rate);
+
+ tmp += sizeof(struct wmi_peer_stats);
+ }
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ complete(&ar->debug.event_stats_compl);
+}
+
+static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ struct ath10k_target_stats *fw_stats;
+ char *buf;
+ unsigned int len = 0, buf_len = 2500;
+ ssize_t ret_cnt;
+ long left;
+ int i;
+ int ret;
+
+ fw_stats = &ar->debug.target_stats;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT);
+ if (ret) {
+ ath10k_warn("could not request stats (%d)\n", ret);
+ kfree(buf);
+ return -EIO;
+ }
+
+ left = wait_for_completion_timeout(&ar->debug.event_stats_compl, 1*HZ);
+
+ if (left <= 0) {
+ kfree(buf);
+ return -ETIMEDOUT;
+ }
+
+ mutex_lock(&ar->conf_mutex);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n",
+ "ath10k PDEV stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Channel noise floor", fw_stats->ch_noise_floor);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Channel TX power", fw_stats->chan_tx_power);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "TX frame count", fw_stats->tx_frame_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RX frame count", fw_stats->rx_frame_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RX clear count", fw_stats->rx_clear_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Cycle count", fw_stats->cycle_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PHY error count", fw_stats->phy_err_count);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n",
+ "ath10k PDEV TX stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HTT cookies queued", fw_stats->comp_queued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HTT cookies disp.", fw_stats->comp_delivered);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDU queued", fw_stats->msdu_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDU queued", fw_stats->mpdu_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs dropped", fw_stats->wmm_drop);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Local enqued", fw_stats->local_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Local freed", fw_stats->local_freed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HW queued", fw_stats->hw_queued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PPDUs reaped", fw_stats->hw_reaped);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Num underruns", fw_stats->underrun);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PPDUs cleaned", fw_stats->tx_abort);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs requed", fw_stats->mpdus_requed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Excessive retries", fw_stats->tx_ko);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HW rate", fw_stats->data_rc);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Sched self tiggers", fw_stats->self_triggers);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Dropped due to SW retries",
+ fw_stats->sw_retry_failure);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Illegal rate phy errors",
+ fw_stats->illgl_rate_phy_err);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Pdev continous xretry", fw_stats->pdev_cont_xretry);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "TX timeout", fw_stats->pdev_tx_timeout);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PDEV resets", fw_stats->pdev_resets);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PHY underrun", fw_stats->phy_underrun);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDU is more than txop limit", fw_stats->txop_ovf);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n",
+ "ath10k PDEV RX stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Mid PPDU route change",
+ fw_stats->mid_ppdu_route_change);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Tot. number of statuses", fw_stats->status_rcvd);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 0", fw_stats->r0_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 1", fw_stats->r1_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 2", fw_stats->r2_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 3", fw_stats->r3_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs delivered to HTT", fw_stats->htt_msdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs delivered to HTT", fw_stats->htt_mpdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs delivered to stack", fw_stats->loc_msdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs delivered to stack", fw_stats->loc_mpdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Oversized AMSUs", fw_stats->oversize_amsdu);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PHY errors", fw_stats->phy_errs);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PHY errors drops", fw_stats->phy_err_drop);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDU errors (FCS, MIC, ENC)", fw_stats->mpdu_errs);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n",
+ "ath10k PEER stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ for (i = 0; i < fw_stats->peers; i++) {
+ len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
+ "Peer MAC address",
+ fw_stats->peer_stat[i].peer_macaddr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Peer RSSI", fw_stats->peer_stat[i].peer_rssi);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Peer TX rate",
+ fw_stats->peer_stat[i].peer_tx_rate);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ if (len > buf_len)
+ len = buf_len;
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ kfree(buf);
+ return ret_cnt;
+}
+
+static const struct file_operations fops_fw_stats = {
+ .read = ath10k_read_fw_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+int ath10k_debug_create(struct ath10k *ar)
+{
+ ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
+ ar->hw->wiphy->debugfsdir);
+
+ if (!ar->debug.debugfs_phy)
+ return -ENOMEM;
+
+ init_completion(&ar->debug.event_stats_compl);
+
+ debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar,
+ &fops_fw_stats);
+
+ debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar,
+ &fops_wmi_services);
+
+ return 0;
+}
+#endif /* CONFIG_ATH10K_DEBUGFS */
+
+#ifdef CONFIG_ATH10K_DEBUG
+void ath10k_dbg(enum ath10k_debug_mask mask, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (ath10k_debug_mask & mask)
+ ath10k_printk(KERN_DEBUG, "%pV", &vaf);
+
+ trace_ath10k_log_dbg(mask, &vaf);
+
+ va_end(args);
+}
+EXPORT_SYMBOL(ath10k_dbg);
+
+void ath10k_dbg_dump(enum ath10k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len)
+{
+ if (ath10k_debug_mask & mask) {
+ if (msg)
+ ath10k_dbg(mask, "%s\n", msg);
+
+ print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len);
+ }
+
+ /* tracing code doesn't like null strings :/ */
+ trace_ath10k_log_dbg_dump(msg ? msg : "", prefix ? prefix : "",
+ buf, len);
+}
+EXPORT_SYMBOL(ath10k_dbg_dump);
+
+#endif /* CONFIG_ATH10K_DEBUG */
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
new file mode 100644
index 000000000000..168140c54028
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _DEBUG_H_
+#define _DEBUG_H_
+
+#include <linux/types.h>
+#include "trace.h"
+
+enum ath10k_debug_mask {
+ ATH10K_DBG_PCI = 0x00000001,
+ ATH10K_DBG_WMI = 0x00000002,
+ ATH10K_DBG_HTC = 0x00000004,
+ ATH10K_DBG_HTT = 0x00000008,
+ ATH10K_DBG_MAC = 0x00000010,
+ ATH10K_DBG_CORE = 0x00000020,
+ ATH10K_DBG_PCI_DUMP = 0x00000040,
+ ATH10K_DBG_HTT_DUMP = 0x00000080,
+ ATH10K_DBG_MGMT = 0x00000100,
+ ATH10K_DBG_DATA = 0x00000200,
+ ATH10K_DBG_ANY = 0xffffffff,
+};
+
+extern unsigned int ath10k_debug_mask;
+
+extern __printf(1, 2) int ath10k_info(const char *fmt, ...);
+extern __printf(1, 2) int ath10k_err(const char *fmt, ...);
+extern __printf(1, 2) int ath10k_warn(const char *fmt, ...);
+
+#ifdef CONFIG_ATH10K_DEBUGFS
+int ath10k_debug_create(struct ath10k *ar);
+void ath10k_debug_read_service_map(struct ath10k *ar,
+ void *service_map,
+ size_t map_size);
+void ath10k_debug_read_target_stats(struct ath10k *ar,
+ struct wmi_stats_event *ev);
+
+#else
+static inline int ath10k_debug_create(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline void ath10k_debug_read_service_map(struct ath10k *ar,
+ void *service_map,
+ size_t map_size)
+{
+}
+
+static inline void ath10k_debug_read_target_stats(struct ath10k *ar,
+ struct wmi_stats_event *ev)
+{
+}
+#endif /* CONFIG_ATH10K_DEBUGFS */
+
+#ifdef CONFIG_ATH10K_DEBUG
+extern __printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask,
+ const char *fmt, ...);
+void ath10k_dbg_dump(enum ath10k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len);
+#else /* CONFIG_ATH10K_DEBUG */
+
+static inline int ath10k_dbg(enum ath10k_debug_mask dbg_mask,
+ const char *fmt, ...)
+{
+ return 0;
+}
+
+static inline void ath10k_dbg_dump(enum ath10k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len)
+{
+}
+#endif /* CONFIG_ATH10K_DEBUG */
+#endif /* _DEBUG_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
new file mode 100644
index 000000000000..73a24d44d1b4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HIF_H_
+#define _HIF_H_
+
+#include <linux/kernel.h>
+#include "core.h"
+
+struct ath10k_hif_cb {
+ int (*tx_completion)(struct ath10k *ar,
+ struct sk_buff *wbuf,
+ unsigned transfer_id);
+ int (*rx_completion)(struct ath10k *ar,
+ struct sk_buff *wbuf,
+ u8 pipe_id);
+};
+
+struct ath10k_hif_ops {
+ /* Send the head of a buffer to HIF for transmission to the target. */
+ int (*send_head)(struct ath10k *ar, u8 pipe_id,
+ unsigned int transfer_id,
+ unsigned int nbytes,
+ struct sk_buff *buf);
+
+ /*
+ * API to handle HIF-specific BMI message exchanges, this API is
+ * synchronous and only allowed to be called from a context that
+ * can block (sleep)
+ */
+ int (*exchange_bmi_msg)(struct ath10k *ar,
+ void *request, u32 request_len,
+ void *response, u32 *response_len);
+
+ int (*start)(struct ath10k *ar);
+
+ void (*stop)(struct ath10k *ar);
+
+ int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe,
+ int *ul_is_polled, int *dl_is_polled);
+
+ void (*get_default_pipe)(struct ath10k *ar, u8 *ul_pipe, u8 *dl_pipe);
+
+ /*
+ * Check if prior sends have completed.
+ *
+ * Check whether the pipe in question has any completed
+ * sends that have not yet been processed.
+ * This function is only relevant for HIF pipes that are configured
+ * to be polled rather than interrupt-driven.
+ */
+ void (*send_complete_check)(struct ath10k *ar, u8 pipe_id, int force);
+
+ void (*init)(struct ath10k *ar,
+ struct ath10k_hif_cb *callbacks);
+
+ u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id);
+};
+
+
+static inline int ath10k_hif_send_head(struct ath10k *ar, u8 pipe_id,
+ unsigned int transfer_id,
+ unsigned int nbytes,
+ struct sk_buff *buf)
+{
+ return ar->hif.ops->send_head(ar, pipe_id, transfer_id, nbytes, buf);
+}
+
+static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar,
+ void *request, u32 request_len,
+ void *response, u32 *response_len)
+{
+ return ar->hif.ops->exchange_bmi_msg(ar, request, request_len,
+ response, response_len);
+}
+
+static inline int ath10k_hif_start(struct ath10k *ar)
+{
+ return ar->hif.ops->start(ar);
+}
+
+static inline void ath10k_hif_stop(struct ath10k *ar)
+{
+ return ar->hif.ops->stop(ar);
+}
+
+static inline int ath10k_hif_map_service_to_pipe(struct ath10k *ar,
+ u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe,
+ int *ul_is_polled,
+ int *dl_is_polled)
+{
+ return ar->hif.ops->map_service_to_pipe(ar, service_id,
+ ul_pipe, dl_pipe,
+ ul_is_polled, dl_is_polled);
+}
+
+static inline void ath10k_hif_get_default_pipe(struct ath10k *ar,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ ar->hif.ops->get_default_pipe(ar, ul_pipe, dl_pipe);
+}
+
+static inline void ath10k_hif_send_complete_check(struct ath10k *ar,
+ u8 pipe_id, int force)
+{
+ ar->hif.ops->send_complete_check(ar, pipe_id, force);
+}
+
+static inline void ath10k_hif_init(struct ath10k *ar,
+ struct ath10k_hif_cb *callbacks)
+{
+ ar->hif.ops->init(ar, callbacks);
+}
+
+static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,
+ u8 pipe_id)
+{
+ return ar->hif.ops->get_free_queue_number(ar, pipe_id);
+}
+
+#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
new file mode 100644
index 000000000000..74363c949392
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -0,0 +1,1000 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "hif.h"
+#include "debug.h"
+
+/********/
+/* Send */
+/********/
+
+static inline void ath10k_htc_send_complete_check(struct ath10k_htc_ep *ep,
+ int force)
+{
+ /*
+ * Check whether HIF has any prior sends that have finished,
+ * have not had the post-processing done.
+ */
+ ath10k_hif_send_complete_check(ep->htc->ar, ep->ul_pipe_id, force);
+}
+
+static void ath10k_htc_control_tx_complete(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ kfree_skb(skb);
+}
+
+static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
+{
+ struct sk_buff *skb;
+ struct ath10k_skb_cb *skb_cb;
+
+ skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE);
+ if (!skb) {
+ ath10k_warn("Unable to allocate ctrl skb\n");
+ return NULL;
+ }
+
+ skb_reserve(skb, 20); /* FIXME: why 20 bytes? */
+ WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
+
+ skb_cb = ATH10K_SKB_CB(skb);
+ memset(skb_cb, 0, sizeof(*skb_cb));
+
+ ath10k_dbg(ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
+ return skb;
+}
+
+static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
+ struct sk_buff *skb)
+{
+ ath10k_skb_unmap(htc->ar->dev, skb);
+ skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+}
+
+static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
+ ep->eid, skb);
+
+ ath10k_htc_restore_tx_skb(ep->htc, skb);
+
+ if (!ep->ep_ops.ep_tx_complete) {
+ ath10k_warn("no tx handler for eid %d\n", ep->eid);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
+}
+
+/* assumes tx_lock is held */
+static bool ath10k_htc_ep_need_credit_update(struct ath10k_htc_ep *ep)
+{
+ if (!ep->tx_credit_flow_enabled)
+ return false;
+ if (ep->tx_credits >= ep->tx_credits_per_max_message)
+ return false;
+
+ ath10k_dbg(ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n",
+ ep->eid);
+ return true;
+}
+
+static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
+ struct sk_buff *skb)
+{
+ struct ath10k_htc_hdr *hdr;
+
+ hdr = (struct ath10k_htc_hdr *)skb->data;
+ memset(hdr, 0, sizeof(*hdr));
+
+ hdr->eid = ep->eid;
+ hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
+
+ spin_lock_bh(&ep->htc->tx_lock);
+ hdr->seq_no = ep->seq_no++;
+
+ if (ath10k_htc_ep_need_credit_update(ep))
+ hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
+
+ spin_unlock_bh(&ep->htc->tx_lock);
+}
+
+static int ath10k_htc_issue_skb(struct ath10k_htc *htc,
+ struct ath10k_htc_ep *ep,
+ struct sk_buff *skb,
+ u8 credits)
+{
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+ int ret;
+
+ ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
+ ep->eid, skb);
+
+ ath10k_htc_prepare_tx_skb(ep, skb);
+
+ ret = ath10k_skb_map(htc->ar->dev, skb);
+ if (ret)
+ goto err;
+
+ ret = ath10k_hif_send_head(htc->ar,
+ ep->ul_pipe_id,
+ ep->eid,
+ skb->len,
+ skb);
+ if (unlikely(ret))
+ goto err;
+
+ return 0;
+err:
+ ath10k_warn("HTC issue failed: %d\n", ret);
+
+ spin_lock_bh(&htc->tx_lock);
+ ep->tx_credits += credits;
+ spin_unlock_bh(&htc->tx_lock);
+
+ /* this is the simplest way to handle out-of-resources for non-credit
+ * based endpoints. credit based endpoints can still get -ENOSR, but
+ * this is highly unlikely as credit reservation should prevent that */
+ if (ret == -ENOSR) {
+ spin_lock_bh(&htc->tx_lock);
+ __skb_queue_head(&ep->tx_queue, skb);
+ spin_unlock_bh(&htc->tx_lock);
+
+ return ret;
+ }
+
+ skb_cb->is_aborted = true;
+ ath10k_htc_notify_tx_completion(ep, skb);
+
+ return ret;
+}
+
+static struct sk_buff *ath10k_htc_get_skb_credit_based(struct ath10k_htc *htc,
+ struct ath10k_htc_ep *ep,
+ u8 *credits)
+{
+ struct sk_buff *skb;
+ struct ath10k_skb_cb *skb_cb;
+ int credits_required;
+ int remainder;
+ unsigned int transfer_len;
+
+ lockdep_assert_held(&htc->tx_lock);
+
+ skb = __skb_dequeue(&ep->tx_queue);
+ if (!skb)
+ return NULL;
+
+ skb_cb = ATH10K_SKB_CB(skb);
+ transfer_len = skb->len;
+
+ if (likely(transfer_len <= htc->target_credit_size)) {
+ credits_required = 1;
+ } else {
+ /* figure out how many credits this message requires */
+ credits_required = transfer_len / htc->target_credit_size;
+ remainder = transfer_len % htc->target_credit_size;
+
+ if (remainder)
+ credits_required++;
+ }
+
+ ath10k_dbg(ATH10K_DBG_HTC, "Credits required %d got %d\n",
+ credits_required, ep->tx_credits);
+
+ if (ep->tx_credits < credits_required) {
+ __skb_queue_head(&ep->tx_queue, skb);
+ return NULL;
+ }
+
+ ep->tx_credits -= credits_required;
+ *credits = credits_required;
+ return skb;
+}
+
+static void ath10k_htc_send_work(struct work_struct *work)
+{
+ struct ath10k_htc_ep *ep = container_of(work,
+ struct ath10k_htc_ep, send_work);
+ struct ath10k_htc *htc = ep->htc;
+ struct sk_buff *skb;
+ u8 credits = 0;
+ int ret;
+
+ while (true) {
+ if (ep->ul_is_polled)
+ ath10k_htc_send_complete_check(ep, 0);
+
+ spin_lock_bh(&htc->tx_lock);
+ if (ep->tx_credit_flow_enabled)
+ skb = ath10k_htc_get_skb_credit_based(htc, ep,
+ &credits);
+ else
+ skb = __skb_dequeue(&ep->tx_queue);
+ spin_unlock_bh(&htc->tx_lock);
+
+ if (!skb)
+ break;
+
+ ret = ath10k_htc_issue_skb(htc, ep, skb, credits);
+ if (ret == -ENOSR)
+ break;
+ }
+}
+
+int ath10k_htc_send(struct ath10k_htc *htc,
+ enum ath10k_htc_ep_id eid,
+ struct sk_buff *skb)
+{
+ struct ath10k_htc_ep *ep = &htc->endpoint[eid];
+
+ if (eid >= ATH10K_HTC_EP_COUNT) {
+ ath10k_warn("Invalid endpoint id: %d\n", eid);
+ return -ENOENT;
+ }
+
+ skb_push(skb, sizeof(struct ath10k_htc_hdr));
+
+ spin_lock_bh(&htc->tx_lock);
+ __skb_queue_tail(&ep->tx_queue, skb);
+ spin_unlock_bh(&htc->tx_lock);
+
+ queue_work(htc->ar->workqueue, &ep->send_work);
+ return 0;
+}
+
+static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
+ struct sk_buff *skb,
+ unsigned int eid)
+{
+ struct ath10k_htc *htc = ar->htc;
+ struct ath10k_htc_ep *ep = &htc->endpoint[eid];
+ bool stopping;
+
+ ath10k_htc_notify_tx_completion(ep, skb);
+ /* the skb now belongs to the completion handler */
+
+ spin_lock_bh(&htc->tx_lock);
+ stopping = htc->stopping;
+ spin_unlock_bh(&htc->tx_lock);
+
+ if (!ep->tx_credit_flow_enabled && !stopping)
+ /*
+ * note: when using TX credit flow, the re-checking of
+ * queues happens when credits flow back from the target.
+ * in the non-TX credit case, we recheck after the packet
+ * completes
+ */
+ queue_work(ar->workqueue, &ep->send_work);
+
+ return 0;
+}
+
+/* flush endpoint TX queue */
+static void ath10k_htc_flush_endpoint_tx(struct ath10k_htc *htc,
+ struct ath10k_htc_ep *ep)
+{
+ struct sk_buff *skb;
+ struct ath10k_skb_cb *skb_cb;
+
+ spin_lock_bh(&htc->tx_lock);
+ for (;;) {
+ skb = __skb_dequeue(&ep->tx_queue);
+ if (!skb)
+ break;
+
+ skb_cb = ATH10K_SKB_CB(skb);
+ skb_cb->is_aborted = true;
+ ath10k_htc_notify_tx_completion(ep, skb);
+ }
+ spin_unlock_bh(&htc->tx_lock);
+
+ cancel_work_sync(&ep->send_work);
+}
+
+/***********/
+/* Receive */
+/***********/
+
+static void
+ath10k_htc_process_credit_report(struct ath10k_htc *htc,
+ const struct ath10k_htc_credit_report *report,
+ int len,
+ enum ath10k_htc_ep_id eid)
+{
+ struct ath10k_htc_ep *ep;
+ int i, n_reports;
+
+ if (len % sizeof(*report))
+ ath10k_warn("Uneven credit report len %d", len);
+
+ n_reports = len / sizeof(*report);
+
+ spin_lock_bh(&htc->tx_lock);
+ for (i = 0; i < n_reports; i++, report++) {
+ if (report->eid >= ATH10K_HTC_EP_COUNT)
+ break;
+
+ ath10k_dbg(ATH10K_DBG_HTC, "ep %d got %d credits\n",
+ report->eid, report->credits);
+
+ ep = &htc->endpoint[report->eid];
+ ep->tx_credits += report->credits;
+
+ if (ep->tx_credits && !skb_queue_empty(&ep->tx_queue))
+ queue_work(htc->ar->workqueue, &ep->send_work);
+ }
+ spin_unlock_bh(&htc->tx_lock);
+}
+
+static int ath10k_htc_process_trailer(struct ath10k_htc *htc,
+ u8 *buffer,
+ int length,
+ enum ath10k_htc_ep_id src_eid)
+{
+ int status = 0;
+ struct ath10k_htc_record *record;
+ u8 *orig_buffer;
+ int orig_length;
+ size_t len;
+
+ orig_buffer = buffer;
+ orig_length = length;
+
+ while (length > 0) {
+ record = (struct ath10k_htc_record *)buffer;
+
+ if (length < sizeof(record->hdr)) {
+ status = -EINVAL;
+ break;
+ }
+
+ if (record->hdr.len > length) {
+ /* no room left in buffer for record */
+ ath10k_warn("Invalid record length: %d\n",
+ record->hdr.len);
+ status = -EINVAL;
+ break;
+ }
+
+ switch (record->hdr.id) {
+ case ATH10K_HTC_RECORD_CREDITS:
+ len = sizeof(struct ath10k_htc_credit_report);
+ if (record->hdr.len < len) {
+ ath10k_warn("Credit report too long\n");
+ status = -EINVAL;
+ break;
+ }
+ ath10k_htc_process_credit_report(htc,
+ record->credit_report,
+ record->hdr.len,
+ src_eid);
+ break;
+ default:
+ ath10k_warn("Unhandled record: id:%d length:%d\n",
+ record->hdr.id, record->hdr.len);
+ break;
+ }
+
+ if (status)
+ break;
+
+ /* multiple records may be present in a trailer */
+ buffer += sizeof(record->hdr) + record->hdr.len;
+ length -= sizeof(record->hdr) + record->hdr.len;
+ }
+
+ if (status)
+ ath10k_dbg_dump(ATH10K_DBG_HTC, "htc rx bad trailer", "",
+ orig_buffer, orig_length);
+
+ return status;
+}
+
+static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
+ struct sk_buff *skb,
+ u8 pipe_id)
+{
+ int status = 0;
+ struct ath10k_htc *htc = ar->htc;
+ struct ath10k_htc_hdr *hdr;
+ struct ath10k_htc_ep *ep;
+ u16 payload_len;
+ u32 trailer_len = 0;
+ size_t min_len;
+ u8 eid;
+ bool trailer_present;
+
+ hdr = (struct ath10k_htc_hdr *)skb->data;
+ skb_pull(skb, sizeof(*hdr));
+
+ eid = hdr->eid;
+
+ if (eid >= ATH10K_HTC_EP_COUNT) {
+ ath10k_warn("HTC Rx: invalid eid %d\n", eid);
+ ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad header", "",
+ hdr, sizeof(*hdr));
+ status = -EINVAL;
+ goto out;
+ }
+
+ ep = &htc->endpoint[eid];
+
+ /*
+ * If this endpoint that received a message from the target has
+ * a to-target HIF pipe whose send completions are polled rather
+ * than interrupt-driven, this is a good point to ask HIF to check
+ * whether it has any completed sends to handle.
+ */
+ if (ep->ul_is_polled)
+ ath10k_htc_send_complete_check(ep, 1);
+
+ payload_len = __le16_to_cpu(hdr->len);
+
+ if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
+ ath10k_warn("HTC rx frame too long, len: %zu\n",
+ payload_len + sizeof(*hdr));
+ ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len", "",
+ hdr, sizeof(*hdr));
+ status = -EINVAL;
+ goto out;
+ }
+
+ if (skb->len < payload_len) {
+ ath10k_dbg(ATH10K_DBG_HTC,
+ "HTC Rx: insufficient length, got %d, expected %d\n",
+ skb->len, payload_len);
+ ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len",
+ "", hdr, sizeof(*hdr));
+ status = -EINVAL;
+ goto out;
+ }
+
+ /* get flags to check for trailer */
+ trailer_present = hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
+ if (trailer_present) {
+ u8 *trailer;
+
+ trailer_len = hdr->trailer_len;
+ min_len = sizeof(struct ath10k_ath10k_htc_record_hdr);
+
+ if ((trailer_len < min_len) ||
+ (trailer_len > payload_len)) {
+ ath10k_warn("Invalid trailer length: %d\n",
+ trailer_len);
+ status = -EPROTO;
+ goto out;
+ }
+
+ trailer = (u8 *)hdr;
+ trailer += sizeof(*hdr);
+ trailer += payload_len;
+ trailer -= trailer_len;
+ status = ath10k_htc_process_trailer(htc, trailer,
+ trailer_len, hdr->eid);
+ if (status)
+ goto out;
+
+ skb_trim(skb, skb->len - trailer_len);
+ }
+
+ if (((int)payload_len - (int)trailer_len) <= 0)
+ /* zero length packet with trailer data, just drop these */
+ goto out;
+
+ if (eid == ATH10K_HTC_EP_0) {
+ struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
+
+ switch (__le16_to_cpu(msg->hdr.message_id)) {
+ default:
+ /* handle HTC control message */
+ if (completion_done(&htc->ctl_resp)) {
+ /*
+ * this is a fatal error, target should not be
+ * sending unsolicited messages on the ep 0
+ */
+ ath10k_warn("HTC rx ctrl still processing\n");
+ status = -EINVAL;
+ complete(&htc->ctl_resp);
+ goto out;
+ }
+
+ htc->control_resp_len =
+ min_t(int, skb->len,
+ ATH10K_HTC_MAX_CTRL_MSG_LEN);
+
+ memcpy(htc->control_resp_buffer, skb->data,
+ htc->control_resp_len);
+
+ complete(&htc->ctl_resp);
+ break;
+ case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
+ htc->htc_ops.target_send_suspend_complete(ar);
+ }
+ goto out;
+ }
+
+ ath10k_dbg(ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
+ eid, skb);
+ ep->ep_ops.ep_rx_complete(ar, skb);
+
+ /* skb is now owned by the rx completion handler */
+ skb = NULL;
+out:
+ kfree_skb(skb);
+
+ return status;
+}
+
+static void ath10k_htc_control_rx_complete(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ /* This is unexpected. FW is not supposed to send regular rx on this
+ * endpoint. */
+ ath10k_warn("unexpected htc rx\n");
+ kfree_skb(skb);
+}
+
+/***************/
+/* Init/Deinit */
+/***************/
+
+static const char *htc_service_name(enum ath10k_htc_svc_id id)
+{
+ switch (id) {
+ case ATH10K_HTC_SVC_ID_RESERVED:
+ return "Reserved";
+ case ATH10K_HTC_SVC_ID_RSVD_CTRL:
+ return "Control";
+ case ATH10K_HTC_SVC_ID_WMI_CONTROL:
+ return "WMI";
+ case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
+ return "DATA BE";
+ case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
+ return "DATA BK";
+ case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
+ return "DATA VI";
+ case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
+ return "DATA VO";
+ case ATH10K_HTC_SVC_ID_NMI_CONTROL:
+ return "NMI Control";
+ case ATH10K_HTC_SVC_ID_NMI_DATA:
+ return "NMI Data";
+ case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
+ return "HTT Data";
+ case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
+ return "RAW";
+ }
+
+ return "Unknown";
+}
+
+static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
+{
+ struct ath10k_htc_ep *ep;
+ int i;
+
+ for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
+ ep = &htc->endpoint[i];
+ ep->service_id = ATH10K_HTC_SVC_ID_UNUSED;
+ ep->max_ep_message_len = 0;
+ ep->max_tx_queue_depth = 0;
+ ep->eid = i;
+ skb_queue_head_init(&ep->tx_queue);
+ ep->htc = htc;
+ ep->tx_credit_flow_enabled = true;
+ INIT_WORK(&ep->send_work, ath10k_htc_send_work);
+ }
+}
+
+static void ath10k_htc_setup_target_buffer_assignments(struct ath10k_htc *htc)
+{
+ struct ath10k_htc_svc_tx_credits *entry;
+
+ entry = &htc->service_tx_alloc[0];
+
+ /*
+ * for PCIE allocate all credists/HTC buffers to WMI.
+ * no buffers are used/required for data. data always
+ * remains on host.
+ */
+ entry++;
+ entry->service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
+ entry->credit_allocation = htc->total_transmit_credits;
+}
+
+static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
+ u16 service_id)
+{
+ u8 allocation = 0;
+ int i;
+
+ for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {
+ if (htc->service_tx_alloc[i].service_id == service_id)
+ allocation =
+ htc->service_tx_alloc[i].credit_allocation;
+ }
+
+ return allocation;
+}
+
+int ath10k_htc_wait_target(struct ath10k_htc *htc)
+{
+ int status = 0;
+ struct ath10k_htc_svc_conn_req conn_req;
+ struct ath10k_htc_svc_conn_resp conn_resp;
+ struct ath10k_htc_msg *msg;
+ u16 message_id;
+ u16 credit_count;
+ u16 credit_size;
+
+ INIT_COMPLETION(htc->ctl_resp);
+
+ status = ath10k_hif_start(htc->ar);
+ if (status) {
+ ath10k_err("could not start HIF (%d)\n", status);
+ goto err_start;
+ }
+
+ status = wait_for_completion_timeout(&htc->ctl_resp,
+ ATH10K_HTC_WAIT_TIMEOUT_HZ);
+ if (status <= 0) {
+ if (status == 0)
+ status = -ETIMEDOUT;
+
+ ath10k_err("ctl_resp never came in (%d)\n", status);
+ goto err_target;
+ }
+
+ if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
+ ath10k_err("Invalid HTC ready msg len:%d\n",
+ htc->control_resp_len);
+
+ status = -ECOMM;
+ goto err_target;
+ }
+
+ msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
+ message_id = __le16_to_cpu(msg->hdr.message_id);
+ credit_count = __le16_to_cpu(msg->ready.credit_count);
+ credit_size = __le16_to_cpu(msg->ready.credit_size);
+
+ if (message_id != ATH10K_HTC_MSG_READY_ID) {
+ ath10k_err("Invalid HTC ready msg: 0x%x\n", message_id);
+ status = -ECOMM;
+ goto err_target;
+ }
+
+ htc->total_transmit_credits = credit_count;
+ htc->target_credit_size = credit_size;
+
+ ath10k_dbg(ATH10K_DBG_HTC,
+ "Target ready! transmit resources: %d size:%d\n",
+ htc->total_transmit_credits,
+ htc->target_credit_size);
+
+ if ((htc->total_transmit_credits == 0) ||
+ (htc->target_credit_size == 0)) {
+ status = -ECOMM;
+ ath10k_err("Invalid credit size received\n");
+ goto err_target;
+ }
+
+ ath10k_htc_setup_target_buffer_assignments(htc);
+
+ /* setup our pseudo HTC control endpoint connection */
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+ conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete;
+ conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS;
+ conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL;
+
+ /* connect fake service */
+ status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
+ if (status) {
+ ath10k_err("could not connect to htc service (%d)\n", status);
+ goto err_target;
+ }
+
+ return 0;
+err_target:
+ ath10k_hif_stop(htc->ar);
+err_start:
+ return status;
+}
+
+int ath10k_htc_connect_service(struct ath10k_htc *htc,
+ struct ath10k_htc_svc_conn_req *conn_req,
+ struct ath10k_htc_svc_conn_resp *conn_resp)
+{
+ struct ath10k_htc_msg *msg;
+ struct ath10k_htc_conn_svc *req_msg;
+ struct ath10k_htc_conn_svc_response resp_msg_dummy;
+ struct ath10k_htc_conn_svc_response *resp_msg = &resp_msg_dummy;
+ enum ath10k_htc_ep_id assigned_eid = ATH10K_HTC_EP_COUNT;
+ struct ath10k_htc_ep *ep;
+ struct sk_buff *skb;
+ unsigned int max_msg_size = 0;
+ int length, status;
+ bool disable_credit_flow_ctrl = false;
+ u16 message_id, service_id, flags = 0;
+ u8 tx_alloc = 0;
+
+ /* special case for HTC pseudo control service */
+ if (conn_req->service_id == ATH10K_HTC_SVC_ID_RSVD_CTRL) {
+ disable_credit_flow_ctrl = true;
+ assigned_eid = ATH10K_HTC_EP_0;
+ max_msg_size = ATH10K_HTC_MAX_CTRL_MSG_LEN;
+ memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
+ goto setup;
+ }
+
+ tx_alloc = ath10k_htc_get_credit_allocation(htc,
+ conn_req->service_id);
+ if (!tx_alloc)
+ ath10k_warn("HTC Service %s does not allocate target credits\n",
+ htc_service_name(conn_req->service_id));
+
+ skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
+ if (!skb) {
+ ath10k_err("Failed to allocate HTC packet\n");
+ return -ENOMEM;
+ }
+
+ length = sizeof(msg->hdr) + sizeof(msg->connect_service);
+ skb_put(skb, length);
+ memset(skb->data, 0, length);
+
+ msg = (struct ath10k_htc_msg *)skb->data;
+ msg->hdr.message_id =
+ __cpu_to_le16(ATH10K_HTC_MSG_CONNECT_SERVICE_ID);
+
+ flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
+
+ req_msg = &msg->connect_service;
+ req_msg->flags = __cpu_to_le16(flags);
+ req_msg->service_id = __cpu_to_le16(conn_req->service_id);
+
+ /* Only enable credit flow control for WMI ctrl service */
+ if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
+ flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
+ disable_credit_flow_ctrl = true;
+ }
+
+ INIT_COMPLETION(htc->ctl_resp);
+
+ status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
+ if (status) {
+ kfree_skb(skb);
+ return status;
+ }
+
+ /* wait for response */
+ status = wait_for_completion_timeout(&htc->ctl_resp,
+ ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
+ if (status <= 0) {
+ if (status == 0)
+ status = -ETIMEDOUT;
+ ath10k_err("Service connect timeout: %d\n", status);
+ return status;
+ }
+
+ /* we controlled the buffer creation, it's aligned */
+ msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
+ resp_msg = &msg->connect_service_response;
+ message_id = __le16_to_cpu(msg->hdr.message_id);
+ service_id = __le16_to_cpu(resp_msg->service_id);
+
+ if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
+ (htc->control_resp_len < sizeof(msg->hdr) +
+ sizeof(msg->connect_service_response))) {
+ ath10k_err("Invalid resp message ID 0x%x", message_id);
+ return -EPROTO;
+ }
+
+ ath10k_dbg(ATH10K_DBG_HTC,
+ "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n",
+ htc_service_name(service_id),
+ resp_msg->status, resp_msg->eid);
+
+ conn_resp->connect_resp_code = resp_msg->status;
+
+ /* check response status */
+ if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) {
+ ath10k_err("HTC Service %s connect request failed: 0x%x)\n",
+ htc_service_name(service_id),
+ resp_msg->status);
+ return -EPROTO;
+ }
+
+ assigned_eid = (enum ath10k_htc_ep_id)resp_msg->eid;
+ max_msg_size = __le16_to_cpu(resp_msg->max_msg_size);
+
+setup:
+
+ if (assigned_eid >= ATH10K_HTC_EP_COUNT)
+ return -EPROTO;
+
+ if (max_msg_size == 0)
+ return -EPROTO;
+
+ ep = &htc->endpoint[assigned_eid];
+ ep->eid = assigned_eid;
+
+ if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED)
+ return -EPROTO;
+
+ /* return assigned endpoint to caller */
+ conn_resp->eid = assigned_eid;
+ conn_resp->max_msg_len = __le16_to_cpu(resp_msg->max_msg_size);
+
+ /* setup the endpoint */
+ ep->service_id = conn_req->service_id;
+ ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
+ ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
+ ep->tx_credits = tx_alloc;
+ ep->tx_credit_size = htc->target_credit_size;
+ ep->tx_credits_per_max_message = ep->max_ep_message_len /
+ htc->target_credit_size;
+
+ if (ep->max_ep_message_len % htc->target_credit_size)
+ ep->tx_credits_per_max_message++;
+
+ /* copy all the callbacks */
+ ep->ep_ops = conn_req->ep_ops;
+
+ status = ath10k_hif_map_service_to_pipe(htc->ar,
+ ep->service_id,
+ &ep->ul_pipe_id,
+ &ep->dl_pipe_id,
+ &ep->ul_is_polled,
+ &ep->dl_is_polled);
+ if (status)
+ return status;
+
+ ath10k_dbg(ATH10K_DBG_HTC,
+ "HTC service: %s UL pipe: %d DL pipe: %d eid: %d ready\n",
+ htc_service_name(ep->service_id), ep->ul_pipe_id,
+ ep->dl_pipe_id, ep->eid);
+
+ ath10k_dbg(ATH10K_DBG_HTC,
+ "EP %d UL polled: %d, DL polled: %d\n",
+ ep->eid, ep->ul_is_polled, ep->dl_is_polled);
+
+ if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
+ ep->tx_credit_flow_enabled = false;
+ ath10k_dbg(ATH10K_DBG_HTC,
+ "HTC service: %s eid: %d TX flow control disabled\n",
+ htc_service_name(ep->service_id), assigned_eid);
+ }
+
+ return status;
+}
+
+struct sk_buff *ath10k_htc_alloc_skb(int size)
+{
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr));
+ if (!skb) {
+ ath10k_warn("could not allocate HTC tx skb\n");
+ return NULL;
+ }
+
+ skb_reserve(skb, sizeof(struct ath10k_htc_hdr));
+
+ /* FW/HTC requires 4-byte aligned streams */
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
+ ath10k_warn("Unaligned HTC tx skb\n");
+
+ return skb;
+}
+
+int ath10k_htc_start(struct ath10k_htc *htc)
+{
+ struct sk_buff *skb;
+ int status = 0;
+ struct ath10k_htc_msg *msg;
+
+ skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext));
+ memset(skb->data, 0, skb->len);
+
+ msg = (struct ath10k_htc_msg *)skb->data;
+ msg->hdr.message_id =
+ __cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID);
+
+ ath10k_dbg(ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
+
+ status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
+ if (status) {
+ kfree_skb(skb);
+ return status;
+ }
+
+ return 0;
+}
+
+/*
+ * stop HTC communications, i.e. stop interrupt reception, and flush all
+ * queued buffers
+ */
+void ath10k_htc_stop(struct ath10k_htc *htc)
+{
+ int i;
+ struct ath10k_htc_ep *ep;
+
+ spin_lock_bh(&htc->tx_lock);
+ htc->stopping = true;
+ spin_unlock_bh(&htc->tx_lock);
+
+ for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
+ ep = &htc->endpoint[i];
+ ath10k_htc_flush_endpoint_tx(htc, ep);
+ }
+
+ ath10k_hif_stop(htc->ar);
+ ath10k_htc_reset_endpoint_states(htc);
+}
+
+/* registered target arrival callback from the HIF layer */
+struct ath10k_htc *ath10k_htc_create(struct ath10k *ar,
+ struct ath10k_htc_ops *htc_ops)
+{
+ struct ath10k_hif_cb htc_callbacks;
+ struct ath10k_htc_ep *ep = NULL;
+ struct ath10k_htc *htc = NULL;
+
+ /* FIXME: use struct ath10k instead */
+ htc = kzalloc(sizeof(struct ath10k_htc), GFP_KERNEL);
+ if (!htc)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&htc->tx_lock);
+
+ memcpy(&htc->htc_ops, htc_ops, sizeof(struct ath10k_htc_ops));
+
+ ath10k_htc_reset_endpoint_states(htc);
+
+ /* setup HIF layer callbacks */
+ htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
+ htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
+ htc->ar = ar;
+
+ /* Get HIF default pipe for HTC message exchange */
+ ep = &htc->endpoint[ATH10K_HTC_EP_0];
+
+ ath10k_hif_init(ar, &htc_callbacks);
+ ath10k_hif_get_default_pipe(ar, &ep->ul_pipe_id, &ep->dl_pipe_id);
+
+ init_completion(&htc->ctl_resp);
+
+ return htc;
+}
+
+void ath10k_htc_destroy(struct ath10k_htc *htc)
+{
+ kfree(htc);
+}
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
new file mode 100644
index 000000000000..fa45844b59fb
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -0,0 +1,368 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HTC_H_
+#define _HTC_H_
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/bug.h>
+#include <linux/skbuff.h>
+#include <linux/semaphore.h>
+#include <linux/timer.h>
+
+struct ath10k;
+
+/****************/
+/* HTC protocol */
+/****************/
+
+/*
+ * HTC - host-target control protocol
+ *
+ * tx packets are generally <htc_hdr><payload>
+ * rx packets are more complex: <htc_hdr><payload><trailer>
+ *
+ * The payload + trailer length is stored in len.
+ * To get payload-only length one needs to payload - trailer_len.
+ *
+ * Trailer contains (possibly) multiple <htc_record>.
+ * Each record is a id-len-value.
+ *
+ * HTC header flags, control_byte0, control_byte1
+ * have different meaning depending whether its tx
+ * or rx.
+ *
+ * Alignment: htc_hdr, payload and trailer are
+ * 4-byte aligned.
+ */
+
+enum ath10k_htc_tx_flags {
+ ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
+ ATH10K_HTC_FLAG_SEND_BUNDLE = 0x02
+};
+
+enum ath10k_htc_rx_flags {
+ ATH10K_HTC_FLAG_TRAILER_PRESENT = 0x02,
+ ATH10K_HTC_FLAG_BUNDLE_MASK = 0xF0
+};
+
+struct ath10k_htc_hdr {
+ u8 eid; /* @enum ath10k_htc_ep_id */
+ u8 flags; /* @enum ath10k_htc_tx_flags, ath10k_htc_rx_flags */
+ __le16 len;
+ union {
+ u8 trailer_len; /* for rx */
+ u8 control_byte0;
+ } __packed;
+ union {
+ u8 seq_no; /* for tx */
+ u8 control_byte1;
+ } __packed;
+ u8 pad0;
+ u8 pad1;
+} __packed __aligned(4);
+
+enum ath10k_ath10k_htc_msg_id {
+ ATH10K_HTC_MSG_READY_ID = 1,
+ ATH10K_HTC_MSG_CONNECT_SERVICE_ID = 2,
+ ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID = 3,
+ ATH10K_HTC_MSG_SETUP_COMPLETE_ID = 4,
+ ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID = 5,
+ ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE = 6
+};
+
+enum ath10k_htc_version {
+ ATH10K_HTC_VERSION_2P0 = 0x00, /* 2.0 */
+ ATH10K_HTC_VERSION_2P1 = 0x01, /* 2.1 */
+};
+
+enum ath10k_htc_conn_flags {
+ ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_FOURTH = 0x0,
+ ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_HALF = 0x1,
+ ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_THREE_FOURTHS = 0x2,
+ ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_UNITY = 0x3,
+#define ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_MASK 0x3
+ ATH10K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE = 1 << 2,
+ ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL = 1 << 3
+#define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_MASK 0xFF00
+#define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_LSB 8
+};
+
+enum ath10k_htc_conn_svc_status {
+ ATH10K_HTC_CONN_SVC_STATUS_SUCCESS = 0,
+ ATH10K_HTC_CONN_SVC_STATUS_NOT_FOUND = 1,
+ ATH10K_HTC_CONN_SVC_STATUS_FAILED = 2,
+ ATH10K_HTC_CONN_SVC_STATUS_NO_RESOURCES = 3,
+ ATH10K_HTC_CONN_SVC_STATUS_NO_MORE_EP = 4
+};
+
+struct ath10k_ath10k_htc_msg_hdr {
+ __le16 message_id; /* @enum htc_message_id */
+} __packed;
+
+struct ath10k_htc_unknown {
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath10k_htc_ready {
+ __le16 credit_count;
+ __le16 credit_size;
+ u8 max_endpoints;
+ u8 pad0;
+} __packed;
+
+struct ath10k_htc_ready_extended {
+ struct ath10k_htc_ready base;
+ u8 htc_version; /* @enum ath10k_htc_version */
+ u8 max_msgs_per_htc_bundle;
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath10k_htc_conn_svc {
+ __le16 service_id;
+ __le16 flags; /* @enum ath10k_htc_conn_flags */
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath10k_htc_conn_svc_response {
+ __le16 service_id;
+ u8 status; /* @enum ath10k_htc_conn_svc_status */
+ u8 eid;
+ __le16 max_msg_size;
+} __packed;
+
+struct ath10k_htc_setup_complete_extended {
+ u8 pad0;
+ u8 pad1;
+ __le32 flags; /* @enum htc_setup_complete_flags */
+ u8 max_msgs_per_bundled_recv;
+ u8 pad2;
+ u8 pad3;
+ u8 pad4;
+} __packed;
+
+struct ath10k_htc_msg {
+ struct ath10k_ath10k_htc_msg_hdr hdr;
+ union {
+ /* host-to-target */
+ struct ath10k_htc_conn_svc connect_service;
+ struct ath10k_htc_ready ready;
+ struct ath10k_htc_ready_extended ready_ext;
+ struct ath10k_htc_unknown unknown;
+ struct ath10k_htc_setup_complete_extended setup_complete_ext;
+
+ /* target-to-host */
+ struct ath10k_htc_conn_svc_response connect_service_response;
+ };
+} __packed __aligned(4);
+
+enum ath10k_ath10k_htc_record_id {
+ ATH10K_HTC_RECORD_NULL = 0,
+ ATH10K_HTC_RECORD_CREDITS = 1
+};
+
+struct ath10k_ath10k_htc_record_hdr {
+ u8 id; /* @enum ath10k_ath10k_htc_record_id */
+ u8 len;
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath10k_htc_credit_report {
+ u8 eid; /* @enum ath10k_htc_ep_id */
+ u8 credits;
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath10k_htc_record {
+ struct ath10k_ath10k_htc_record_hdr hdr;
+ union {
+ struct ath10k_htc_credit_report credit_report[0];
+ u8 pauload[0];
+ };
+} __packed __aligned(4);
+
+/*
+ * note: the trailer offset is dynamic depending
+ * on payload length. this is only a struct layout draft
+ */
+struct ath10k_htc_frame {
+ struct ath10k_htc_hdr hdr;
+ union {
+ struct ath10k_htc_msg msg;
+ u8 payload[0];
+ };
+ struct ath10k_htc_record trailer[0];
+} __packed __aligned(4);
+
+
+/*******************/
+/* Host-side stuff */
+/*******************/
+
+enum ath10k_htc_svc_gid {
+ ATH10K_HTC_SVC_GRP_RSVD = 0,
+ ATH10K_HTC_SVC_GRP_WMI = 1,
+ ATH10K_HTC_SVC_GRP_NMI = 2,
+ ATH10K_HTC_SVC_GRP_HTT = 3,
+
+ ATH10K_HTC_SVC_GRP_TEST = 254,
+ ATH10K_HTC_SVC_GRP_LAST = 255,
+};
+
+#define SVC(group, idx) \
+ (int)(((int)(group) << 8) | (int)(idx))
+
+enum ath10k_htc_svc_id {
+ /* NOTE: service ID of 0x0000 is reserved and should never be used */
+ ATH10K_HTC_SVC_ID_RESERVED = 0x0000,
+ ATH10K_HTC_SVC_ID_UNUSED = ATH10K_HTC_SVC_ID_RESERVED,
+
+ ATH10K_HTC_SVC_ID_RSVD_CTRL = SVC(ATH10K_HTC_SVC_GRP_RSVD, 1),
+ ATH10K_HTC_SVC_ID_WMI_CONTROL = SVC(ATH10K_HTC_SVC_GRP_WMI, 0),
+ ATH10K_HTC_SVC_ID_WMI_DATA_BE = SVC(ATH10K_HTC_SVC_GRP_WMI, 1),
+ ATH10K_HTC_SVC_ID_WMI_DATA_BK = SVC(ATH10K_HTC_SVC_GRP_WMI, 2),
+ ATH10K_HTC_SVC_ID_WMI_DATA_VI = SVC(ATH10K_HTC_SVC_GRP_WMI, 3),
+ ATH10K_HTC_SVC_ID_WMI_DATA_VO = SVC(ATH10K_HTC_SVC_GRP_WMI, 4),
+
+ ATH10K_HTC_SVC_ID_NMI_CONTROL = SVC(ATH10K_HTC_SVC_GRP_NMI, 0),
+ ATH10K_HTC_SVC_ID_NMI_DATA = SVC(ATH10K_HTC_SVC_GRP_NMI, 1),
+
+ ATH10K_HTC_SVC_ID_HTT_DATA_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 0),
+
+ /* raw stream service (i.e. flash, tcmd, calibration apps) */
+ ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS = SVC(ATH10K_HTC_SVC_GRP_TEST, 0),
+};
+
+#undef SVC
+
+enum ath10k_htc_ep_id {
+ ATH10K_HTC_EP_UNUSED = -1,
+ ATH10K_HTC_EP_0 = 0,
+ ATH10K_HTC_EP_1 = 1,
+ ATH10K_HTC_EP_2,
+ ATH10K_HTC_EP_3,
+ ATH10K_HTC_EP_4,
+ ATH10K_HTC_EP_5,
+ ATH10K_HTC_EP_6,
+ ATH10K_HTC_EP_7,
+ ATH10K_HTC_EP_8,
+ ATH10K_HTC_EP_COUNT,
+};
+
+struct ath10k_htc_ops {
+ void (*target_send_suspend_complete)(struct ath10k *ar);
+};
+
+struct ath10k_htc_ep_ops {
+ void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
+ void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
+};
+
+/* service connection information */
+struct ath10k_htc_svc_conn_req {
+ u16 service_id;
+ struct ath10k_htc_ep_ops ep_ops;
+ int max_send_queue_depth;
+};
+
+/* service connection response information */
+struct ath10k_htc_svc_conn_resp {
+ u8 buffer_len;
+ u8 actual_len;
+ enum ath10k_htc_ep_id eid;
+ unsigned int max_msg_len;
+ u8 connect_resp_code;
+};
+
+#define ATH10K_NUM_CONTROL_TX_BUFFERS 2
+#define ATH10K_HTC_MAX_LEN 4096
+#define ATH10K_HTC_MAX_CTRL_MSG_LEN 256
+#define ATH10K_HTC_WAIT_TIMEOUT_HZ (1*HZ)
+#define ATH10K_HTC_CONTROL_BUFFER_SIZE (ATH10K_HTC_MAX_CTRL_MSG_LEN + \
+ sizeof(struct ath10k_htc_hdr))
+#define ATH10K_HTC_CONN_SVC_TIMEOUT_HZ (1*HZ)
+
+struct ath10k_htc_ep {
+ struct ath10k_htc *htc;
+ enum ath10k_htc_ep_id eid;
+ enum ath10k_htc_svc_id service_id;
+ struct ath10k_htc_ep_ops ep_ops;
+
+ int max_tx_queue_depth;
+ int max_ep_message_len;
+ u8 ul_pipe_id;
+ u8 dl_pipe_id;
+ int ul_is_polled; /* call HIF to get tx completions */
+ int dl_is_polled; /* call HIF to fetch rx (not implemented) */
+
+ struct sk_buff_head tx_queue;
+
+ u8 seq_no; /* for debugging */
+ int tx_credits;
+ int tx_credit_size;
+ int tx_credits_per_max_message;
+ bool tx_credit_flow_enabled;
+
+ struct work_struct send_work;
+};
+
+struct ath10k_htc_svc_tx_credits {
+ u16 service_id;
+ u8 credit_allocation;
+};
+
+struct ath10k_htc {
+ struct ath10k *ar;
+ struct ath10k_htc_ep endpoint[ATH10K_HTC_EP_COUNT];
+
+ /* protects endpoint and stopping fields */
+ spinlock_t tx_lock;
+
+ struct ath10k_htc_ops htc_ops;
+
+ u8 control_resp_buffer[ATH10K_HTC_MAX_CTRL_MSG_LEN];
+ int control_resp_len;
+
+ struct completion ctl_resp;
+
+ int total_transmit_credits;
+ struct ath10k_htc_svc_tx_credits service_tx_alloc[ATH10K_HTC_EP_COUNT];
+ int target_credit_size;
+
+ bool stopping;
+};
+
+struct ath10k_htc *ath10k_htc_create(struct ath10k *ar,
+ struct ath10k_htc_ops *htc_ops);
+int ath10k_htc_wait_target(struct ath10k_htc *htc);
+int ath10k_htc_start(struct ath10k_htc *htc);
+int ath10k_htc_connect_service(struct ath10k_htc *htc,
+ struct ath10k_htc_svc_conn_req *conn_req,
+ struct ath10k_htc_svc_conn_resp *conn_resp);
+int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
+ struct sk_buff *packet);
+void ath10k_htc_stop(struct ath10k_htc *htc);
+void ath10k_htc_destroy(struct ath10k_htc *htc);
+struct sk_buff *ath10k_htc_alloc_skb(int size);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
new file mode 100644
index 000000000000..185a5468a2f2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/slab.h>
+
+#include "htt.h"
+#include "core.h"
+#include "debug.h"
+
+static int ath10k_htt_htc_attach(struct ath10k_htt *htt)
+{
+ struct ath10k_htc_svc_conn_req conn_req;
+ struct ath10k_htc_svc_conn_resp conn_resp;
+ int status;
+
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+
+ conn_req.ep_ops.ep_tx_complete = ath10k_htt_htc_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath10k_htt_t2h_msg_handler;
+
+ /* connect to control service */
+ conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG;
+
+ status = ath10k_htc_connect_service(htt->ar->htc, &conn_req,
+ &conn_resp);
+
+ if (status)
+ return status;
+
+ htt->eid = conn_resp.eid;
+
+ return 0;
+}
+
+struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar)
+{
+ struct ath10k_htt *htt;
+ int ret;
+
+ htt = kzalloc(sizeof(*htt), GFP_KERNEL);
+ if (!htt)
+ return NULL;
+
+ htt->ar = ar;
+ htt->max_throughput_mbps = 800;
+
+ /*
+ * Connect to HTC service.
+ * This has to be done before calling ath10k_htt_rx_attach,
+ * since ath10k_htt_rx_attach involves sending a rx ring configure
+ * message to the target.
+ */
+ if (ath10k_htt_htc_attach(htt))
+ goto err_htc_attach;
+
+ ret = ath10k_htt_tx_attach(htt);
+ if (ret) {
+ ath10k_err("could not attach htt tx (%d)\n", ret);
+ goto err_htc_attach;
+ }
+
+ if (ath10k_htt_rx_attach(htt))
+ goto err_rx_attach;
+
+ /*
+ * Prefetch enough data to satisfy target
+ * classification engine.
+ * This is for LL chips. HL chips will probably
+ * transfer all frame in the tx fragment.
+ */
+ htt->prefetch_len =
+ 36 + /* 802.11 + qos + ht */
+ 4 + /* 802.1q */
+ 8 + /* llc snap */
+ 2; /* ip4 dscp or ip6 priority */
+
+ return htt;
+
+err_rx_attach:
+ ath10k_htt_tx_detach(htt);
+err_htc_attach:
+ kfree(htt);
+ return NULL;
+}
+
+#define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ)
+
+static int ath10k_htt_verify_version(struct ath10k_htt *htt)
+{
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "htt target version %d.%d; host version %d.%d\n",
+ htt->target_version_major,
+ htt->target_version_minor,
+ HTT_CURRENT_VERSION_MAJOR,
+ HTT_CURRENT_VERSION_MINOR);
+
+ if (htt->target_version_major != HTT_CURRENT_VERSION_MAJOR) {
+ ath10k_err("htt major versions are incompatible!\n");
+ return -ENOTSUPP;
+ }
+
+ if (htt->target_version_minor != HTT_CURRENT_VERSION_MINOR)
+ ath10k_warn("htt minor version differ but still compatible\n");
+
+ return 0;
+}
+
+int ath10k_htt_attach_target(struct ath10k_htt *htt)
+{
+ int status;
+
+ init_completion(&htt->target_version_received);
+
+ status = ath10k_htt_h2t_ver_req_msg(htt);
+ if (status)
+ return status;
+
+ status = wait_for_completion_timeout(&htt->target_version_received,
+ HTT_TARGET_VERSION_TIMEOUT_HZ);
+ if (status <= 0) {
+ ath10k_warn("htt version request timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ status = ath10k_htt_verify_version(htt);
+ if (status)
+ return status;
+
+ return ath10k_htt_send_rx_ring_cfg_ll(htt);
+}
+
+void ath10k_htt_detach(struct ath10k_htt *htt)
+{
+ ath10k_htt_rx_detach(htt);
+ ath10k_htt_tx_detach(htt);
+ kfree(htt);
+}
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
new file mode 100644
index 000000000000..a7a7aa040536
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -0,0 +1,1338 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HTT_H_
+#define _HTT_H_
+
+#include <linux/bug.h>
+
+#include "core.h"
+#include "htc.h"
+#include "rx_desc.h"
+
+#define HTT_CURRENT_VERSION_MAJOR 2
+#define HTT_CURRENT_VERSION_MINOR 1
+
+enum htt_dbg_stats_type {
+ HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
+ HTT_DBG_STATS_RX_REORDER = 1 << 1,
+ HTT_DBG_STATS_RX_RATE_INFO = 1 << 2,
+ HTT_DBG_STATS_TX_PPDU_LOG = 1 << 3,
+ HTT_DBG_STATS_TX_RATE_INFO = 1 << 4,
+ /* bits 5-23 currently reserved */
+
+ HTT_DBG_NUM_STATS /* keep this last */
+};
+
+enum htt_h2t_msg_type { /* host-to-target */
+ HTT_H2T_MSG_TYPE_VERSION_REQ = 0,
+ HTT_H2T_MSG_TYPE_TX_FRM = 1,
+ HTT_H2T_MSG_TYPE_RX_RING_CFG = 2,
+ HTT_H2T_MSG_TYPE_STATS_REQ = 3,
+ HTT_H2T_MSG_TYPE_SYNC = 4,
+ HTT_H2T_MSG_TYPE_AGGR_CFG = 5,
+ HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6,
+ HTT_H2T_MSG_TYPE_MGMT_TX = 7,
+
+ HTT_H2T_NUM_MSGS /* keep this last */
+};
+
+struct htt_cmd_hdr {
+ u8 msg_type;
+} __packed;
+
+struct htt_ver_req {
+ u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
+} __packed;
+
+/*
+ * HTT tx MSDU descriptor
+ *
+ * The HTT tx MSDU descriptor is created by the host HTT SW for each
+ * tx MSDU. The HTT tx MSDU descriptor contains the information that
+ * the target firmware needs for the FW's tx processing, particularly
+ * for creating the HW msdu descriptor.
+ * The same HTT tx descriptor is used for HL and LL systems, though
+ * a few fields within the tx descriptor are used only by LL or
+ * only by HL.
+ * The HTT tx descriptor is defined in two manners: by a struct with
+ * bitfields, and by a series of [dword offset, bit mask, bit shift]
+ * definitions.
+ * The target should use the struct def, for simplicitly and clarity,
+ * but the host shall use the bit-mast + bit-shift defs, to be endian-
+ * neutral. Specifically, the host shall use the get/set macros built
+ * around the mask + shift defs.
+ */
+struct htt_data_tx_desc_frag {
+ __le32 paddr;
+ __le32 len;
+} __packed;
+
+enum htt_data_tx_desc_flags0 {
+ HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0,
+ HTT_DATA_TX_DESC_FLAGS0_NO_AGGR = 1 << 1,
+ HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT = 1 << 2,
+ HTT_DATA_TX_DESC_FLAGS0_NO_CLASSIFY = 1 << 3,
+ HTT_DATA_TX_DESC_FLAGS0_RSVD0 = 1 << 4
+#define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_MASK 0xE0
+#define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_LSB 5
+};
+
+enum htt_data_tx_desc_flags1 {
+#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_BITS 6
+#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_MASK 0x003F
+#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_LSB 0
+#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_BITS 5
+#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_MASK 0x07C0
+#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_LSB 6
+ HTT_DATA_TX_DESC_FLAGS1_POSTPONED = 1 << 11,
+ HTT_DATA_TX_DESC_FLAGS1_MORE_IN_BATCH = 1 << 12,
+ HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD = 1 << 13,
+ HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD = 1 << 14,
+ HTT_DATA_TX_DESC_FLAGS1_RSVD1 = 1 << 15
+};
+
+enum htt_data_tx_ext_tid {
+ HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST = 16,
+ HTT_DATA_TX_EXT_TID_MGMT = 17,
+ HTT_DATA_TX_EXT_TID_INVALID = 31
+};
+
+#define HTT_INVALID_PEERID 0xFFFF
+
+/*
+ * htt_data_tx_desc - used for data tx path
+ *
+ * Note: vdev_id irrelevant for pkt_type == raw and no_classify == 1.
+ * ext_tid: for qos-data frames (0-15), see %HTT_DATA_TX_EXT_TID_
+ * for special kinds of tids
+ * postponed: only for HL hosts. indicates if this is a resend
+ * (HL hosts manage queues on the host )
+ * more_in_batch: only for HL hosts. indicates if more packets are
+ * pending. this allows target to wait and aggregate
+ */
+struct htt_data_tx_desc {
+ u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
+ __le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */
+ __le16 len;
+ __le16 id;
+ __le32 frags_paddr;
+ __le32 peerid;
+ u8 prefetch[0]; /* start of frame, for FW classification engine */
+} __packed;
+
+enum htt_rx_ring_flags {
+ HTT_RX_RING_FLAGS_MAC80211_HDR = 1 << 0,
+ HTT_RX_RING_FLAGS_MSDU_PAYLOAD = 1 << 1,
+ HTT_RX_RING_FLAGS_PPDU_START = 1 << 2,
+ HTT_RX_RING_FLAGS_PPDU_END = 1 << 3,
+ HTT_RX_RING_FLAGS_MPDU_START = 1 << 4,
+ HTT_RX_RING_FLAGS_MPDU_END = 1 << 5,
+ HTT_RX_RING_FLAGS_MSDU_START = 1 << 6,
+ HTT_RX_RING_FLAGS_MSDU_END = 1 << 7,
+ HTT_RX_RING_FLAGS_RX_ATTENTION = 1 << 8,
+ HTT_RX_RING_FLAGS_FRAG_INFO = 1 << 9,
+ HTT_RX_RING_FLAGS_UNICAST_RX = 1 << 10,
+ HTT_RX_RING_FLAGS_MULTICAST_RX = 1 << 11,
+ HTT_RX_RING_FLAGS_CTRL_RX = 1 << 12,
+ HTT_RX_RING_FLAGS_MGMT_RX = 1 << 13,
+ HTT_RX_RING_FLAGS_NULL_RX = 1 << 14,
+ HTT_RX_RING_FLAGS_PHY_DATA_RX = 1 << 15
+};
+
+struct htt_rx_ring_setup_ring {
+ __le32 fw_idx_shadow_reg_paddr;
+ __le32 rx_ring_base_paddr;
+ __le16 rx_ring_len; /* in 4-byte words */
+ __le16 rx_ring_bufsize; /* rx skb size - in bytes */
+ __le16 flags; /* %HTT_RX_RING_FLAGS_ */
+ __le16 fw_idx_init_val;
+
+ /* the following offsets are in 4-byte units */
+ __le16 mac80211_hdr_offset;
+ __le16 msdu_payload_offset;
+ __le16 ppdu_start_offset;
+ __le16 ppdu_end_offset;
+ __le16 mpdu_start_offset;
+ __le16 mpdu_end_offset;
+ __le16 msdu_start_offset;
+ __le16 msdu_end_offset;
+ __le16 rx_attention_offset;
+ __le16 frag_info_offset;
+} __packed;
+
+struct htt_rx_ring_setup_hdr {
+ u8 num_rings; /* supported values: 1, 2 */
+ __le16 rsvd0;
+} __packed;
+
+struct htt_rx_ring_setup {
+ struct htt_rx_ring_setup_hdr hdr;
+ struct htt_rx_ring_setup_ring rings[0];
+} __packed;
+
+/*
+ * htt_stats_req - request target to send specified statistics
+ *
+ * @msg_type: hardcoded %HTT_H2T_MSG_TYPE_STATS_REQ
+ * @upload_types: see %htt_dbg_stats_type. this is 24bit field actually
+ * so make sure its little-endian.
+ * @reset_types: see %htt_dbg_stats_type. this is 24bit field actually
+ * so make sure its little-endian.
+ * @cfg_val: stat_type specific configuration
+ * @stat_type: see %htt_dbg_stats_type
+ * @cookie_lsb: used for confirmation message from target->host
+ * @cookie_msb: ditto as %cookie
+ */
+struct htt_stats_req {
+ u8 upload_types[3];
+ u8 rsvd0;
+ u8 reset_types[3];
+ struct {
+ u8 mpdu_bytes;
+ u8 mpdu_num_msdus;
+ u8 msdu_bytes;
+ } __packed;
+ u8 stat_type;
+ __le32 cookie_lsb;
+ __le32 cookie_msb;
+} __packed;
+
+#define HTT_STATS_REQ_CFG_STAT_TYPE_INVALID 0xff
+
+/*
+ * htt_oob_sync_req - request out-of-band sync
+ *
+ * The HTT SYNC tells the target to suspend processing of subsequent
+ * HTT host-to-target messages until some other target agent locally
+ * informs the target HTT FW that the current sync counter is equal to
+ * or greater than (in a modulo sense) the sync counter specified in
+ * the SYNC message.
+ *
+ * This allows other host-target components to synchronize their operation
+ * with HTT, e.g. to ensure that tx frames don't get transmitted until a
+ * security key has been downloaded to and activated by the target.
+ * In the absence of any explicit synchronization counter value
+ * specification, the target HTT FW will use zero as the default current
+ * sync value.
+ *
+ * The HTT target FW will suspend its host->target message processing as long
+ * as 0 < (in-band sync counter - out-of-band sync counter) & 0xff < 128.
+ */
+struct htt_oob_sync_req {
+ u8 sync_count;
+ __le16 rsvd0;
+} __packed;
+
+#define HTT_AGGR_CONF_MAX_NUM_AMSDU_SUBFRAMES_MASK 0x1F
+#define HTT_AGGR_CONF_MAX_NUM_AMSDU_SUBFRAMES_LSB 0
+
+struct htt_aggr_conf {
+ u8 max_num_ampdu_subframes;
+ union {
+ /* dont use bitfields; undefined behaviour */
+ u8 flags; /* see %HTT_AGGR_CONF_MAX_NUM_AMSDU_SUBFRAMES_ */
+ u8 max_num_amsdu_subframes:5;
+ } __packed;
+} __packed;
+
+#define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32
+
+struct htt_mgmt_tx_desc {
+ u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
+ __le32 msdu_paddr;
+ __le32 desc_id;
+ __le32 len;
+ __le32 vdev_id;
+ u8 hdr[HTT_MGMT_FRM_HDR_DOWNLOAD_LEN];
+} __packed;
+
+enum htt_mgmt_tx_status {
+ HTT_MGMT_TX_STATUS_OK = 0,
+ HTT_MGMT_TX_STATUS_RETRY = 1,
+ HTT_MGMT_TX_STATUS_DROP = 2
+};
+
+/*=== target -> host messages ===============================================*/
+
+
+enum htt_t2h_msg_type {
+ HTT_T2H_MSG_TYPE_VERSION_CONF = 0x0,
+ HTT_T2H_MSG_TYPE_RX_IND = 0x1,
+ HTT_T2H_MSG_TYPE_RX_FLUSH = 0x2,
+ HTT_T2H_MSG_TYPE_PEER_MAP = 0x3,
+ HTT_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
+ HTT_T2H_MSG_TYPE_RX_ADDBA = 0x5,
+ HTT_T2H_MSG_TYPE_RX_DELBA = 0x6,
+ HTT_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
+ HTT_T2H_MSG_TYPE_PKTLOG = 0x8,
+ HTT_T2H_MSG_TYPE_STATS_CONF = 0x9,
+ HTT_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
+ HTT_T2H_MSG_TYPE_SEC_IND = 0xb,
+ HTT_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
+ HTT_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
+ HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION = 0xe,
+ HTT_T2H_MSG_TYPE_TEST,
+ /* keep this last */
+ HTT_T2H_NUM_MSGS
+};
+
+/*
+ * htt_resp_hdr - header for target-to-host messages
+ *
+ * msg_type: see htt_t2h_msg_type
+ */
+struct htt_resp_hdr {
+ u8 msg_type;
+} __packed;
+
+#define HTT_RESP_HDR_MSG_TYPE_OFFSET 0
+#define HTT_RESP_HDR_MSG_TYPE_MASK 0xff
+#define HTT_RESP_HDR_MSG_TYPE_LSB 0
+
+/* htt_ver_resp - response sent for htt_ver_req */
+struct htt_ver_resp {
+ u8 minor;
+ u8 major;
+ u8 rsvd0;
+} __packed;
+
+struct htt_mgmt_tx_completion {
+ u8 rsvd0;
+ u8 rsvd1;
+ u8 rsvd2;
+ __le32 desc_id;
+ __le32 status;
+} __packed;
+
+#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x3F)
+#define HTT_RX_INDICATION_INFO0_EXT_TID_LSB (0)
+#define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 6)
+#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 7)
+
+#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK 0x0000003F
+#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB 0
+#define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_MASK 0x00000FC0
+#define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_LSB 6
+#define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_MASK 0x0003F000
+#define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_LSB 12
+#define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_MASK 0x00FC0000
+#define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_LSB 18
+#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_MASK 0xFF000000
+#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_LSB 24
+
+struct htt_rx_indication_hdr {
+ u8 info0; /* %HTT_RX_INDICATION_INFO0_ */
+ __le16 peer_id;
+ __le32 info1; /* %HTT_RX_INDICATION_INFO1_ */
+} __packed;
+
+#define HTT_RX_INDICATION_INFO0_PHY_ERR_VALID (1 << 0)
+#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_MASK (0x1E)
+#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_LSB (1)
+#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK (1 << 5)
+#define HTT_RX_INDICATION_INFO0_END_VALID (1 << 6)
+#define HTT_RX_INDICATION_INFO0_START_VALID (1 << 7)
+
+#define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_MASK 0x00FFFFFF
+#define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_LSB 0
+#define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_MASK 0xFF000000
+#define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_LSB 24
+
+#define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_MASK 0x00FFFFFF
+#define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_LSB 0
+#define HTT_RX_INDICATION_INFO2_SERVICE_MASK 0xFF000000
+#define HTT_RX_INDICATION_INFO2_SERVICE_LSB 24
+
+enum htt_rx_legacy_rate {
+ HTT_RX_OFDM_48 = 0,
+ HTT_RX_OFDM_24 = 1,
+ HTT_RX_OFDM_12,
+ HTT_RX_OFDM_6,
+ HTT_RX_OFDM_54,
+ HTT_RX_OFDM_36,
+ HTT_RX_OFDM_18,
+ HTT_RX_OFDM_9,
+
+ /* long preamble */
+ HTT_RX_CCK_11_LP = 0,
+ HTT_RX_CCK_5_5_LP = 1,
+ HTT_RX_CCK_2_LP,
+ HTT_RX_CCK_1_LP,
+ /* short preamble */
+ HTT_RX_CCK_11_SP,
+ HTT_RX_CCK_5_5_SP,
+ HTT_RX_CCK_2_SP
+};
+
+enum htt_rx_legacy_rate_type {
+ HTT_RX_LEGACY_RATE_OFDM = 0,
+ HTT_RX_LEGACY_RATE_CCK
+};
+
+enum htt_rx_preamble_type {
+ HTT_RX_LEGACY = 0x4,
+ HTT_RX_HT = 0x8,
+ HTT_RX_HT_WITH_TXBF = 0x9,
+ HTT_RX_VHT = 0xC,
+ HTT_RX_VHT_WITH_TXBF = 0xD,
+};
+
+/*
+ * Fields: phy_err_valid, phy_err_code, tsf,
+ * usec_timestamp, sub_usec_timestamp
+ * ..are valid only if end_valid == 1.
+ *
+ * Fields: rssi_chains, legacy_rate_type,
+ * legacy_rate_cck, preamble_type, service,
+ * vht_sig_*
+ * ..are valid only if start_valid == 1;
+ */
+struct htt_rx_indication_ppdu {
+ u8 combined_rssi;
+ u8 sub_usec_timestamp;
+ u8 phy_err_code;
+ u8 info0; /* HTT_RX_INDICATION_INFO0_ */
+ struct {
+ u8 pri20_db;
+ u8 ext20_db;
+ u8 ext40_db;
+ u8 ext80_db;
+ } __packed rssi_chains[4];
+ __le32 tsf;
+ __le32 usec_timestamp;
+ __le32 info1; /* HTT_RX_INDICATION_INFO1_ */
+ __le32 info2; /* HTT_RX_INDICATION_INFO2_ */
+} __packed;
+
+enum htt_rx_mpdu_status {
+ HTT_RX_IND_MPDU_STATUS_UNKNOWN = 0x0,
+ HTT_RX_IND_MPDU_STATUS_OK,
+ HTT_RX_IND_MPDU_STATUS_ERR_FCS,
+ HTT_RX_IND_MPDU_STATUS_ERR_DUP,
+ HTT_RX_IND_MPDU_STATUS_ERR_REPLAY,
+ HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER,
+ /* only accept EAPOL frames */
+ HTT_RX_IND_MPDU_STATUS_UNAUTH_PEER,
+ HTT_RX_IND_MPDU_STATUS_OUT_OF_SYNC,
+ /* Non-data in promiscous mode */
+ HTT_RX_IND_MPDU_STATUS_MGMT_CTRL,
+ HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR,
+ HTT_RX_IND_MPDU_STATUS_DECRYPT_ERR,
+ HTT_RX_IND_MPDU_STATUS_MPDU_LENGTH_ERR,
+ HTT_RX_IND_MPDU_STATUS_ENCRYPT_REQUIRED_ERR,
+ HTT_RX_IND_MPDU_STATUS_PRIVACY_ERR,
+
+ /*
+ * MISC: discard for unspecified reasons.
+ * Leave this enum value last.
+ */
+ HTT_RX_IND_MPDU_STATUS_ERR_MISC = 0xFF
+};
+
+struct htt_rx_indication_mpdu_range {
+ u8 mpdu_count;
+ u8 mpdu_range_status; /* %htt_rx_mpdu_status */
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct htt_rx_indication_prefix {
+ __le16 fw_rx_desc_bytes;
+ u8 pad0;
+ u8 pad1;
+};
+
+struct htt_rx_indication {
+ struct htt_rx_indication_hdr hdr;
+ struct htt_rx_indication_ppdu ppdu;
+ struct htt_rx_indication_prefix prefix;
+
+ /*
+ * the following fields are both dynamically sized, so
+ * take care addressing them
+ */
+
+ /* the size of this is %fw_rx_desc_bytes */
+ struct fw_rx_desc_base fw_desc;
+
+ /*
+ * %mpdu_ranges starts after &%prefix + roundup(%fw_rx_desc_bytes, 4)
+ * and has %num_mpdu_ranges elements.
+ */
+ struct htt_rx_indication_mpdu_range mpdu_ranges[0];
+} __packed;
+
+static inline struct htt_rx_indication_mpdu_range *
+ htt_rx_ind_get_mpdu_ranges(struct htt_rx_indication *rx_ind)
+{
+ void *ptr = rx_ind;
+
+ ptr += sizeof(rx_ind->hdr)
+ + sizeof(rx_ind->ppdu)
+ + sizeof(rx_ind->prefix)
+ + roundup(__le16_to_cpu(rx_ind->prefix.fw_rx_desc_bytes), 4);
+ return ptr;
+}
+
+enum htt_rx_flush_mpdu_status {
+ HTT_RX_FLUSH_MPDU_DISCARD = 0,
+ HTT_RX_FLUSH_MPDU_REORDER = 1,
+};
+
+/*
+ * htt_rx_flush - discard or reorder given range of mpdus
+ *
+ * Note: host must check if all sequence numbers between
+ * [seq_num_start, seq_num_end-1] are valid.
+ */
+struct htt_rx_flush {
+ __le16 peer_id;
+ u8 tid;
+ u8 rsvd0;
+ u8 mpdu_status; /* %htt_rx_flush_mpdu_status */
+ u8 seq_num_start; /* it is 6 LSBs of 802.11 seq no */
+ u8 seq_num_end; /* it is 6 LSBs of 802.11 seq no */
+};
+
+struct htt_rx_peer_map {
+ u8 vdev_id;
+ __le16 peer_id;
+ u8 addr[6];
+ u8 rsvd0;
+ u8 rsvd1;
+} __packed;
+
+struct htt_rx_peer_unmap {
+ u8 rsvd0;
+ __le16 peer_id;
+} __packed;
+
+enum htt_security_types {
+ HTT_SECURITY_NONE,
+ HTT_SECURITY_WEP128,
+ HTT_SECURITY_WEP104,
+ HTT_SECURITY_WEP40,
+ HTT_SECURITY_TKIP,
+ HTT_SECURITY_TKIP_NOMIC,
+ HTT_SECURITY_AES_CCMP,
+ HTT_SECURITY_WAPI,
+
+ HTT_NUM_SECURITY_TYPES /* keep this last! */
+};
+
+enum htt_security_flags {
+#define HTT_SECURITY_TYPE_MASK 0x7F
+#define HTT_SECURITY_TYPE_LSB 0
+ HTT_SECURITY_IS_UNICAST = 1 << 7
+};
+
+struct htt_security_indication {
+ union {
+ /* dont use bitfields; undefined behaviour */
+ u8 flags; /* %htt_security_flags */
+ struct {
+ u8 security_type:7, /* %htt_security_types */
+ is_unicast:1;
+ } __packed;
+ } __packed;
+ __le16 peer_id;
+ u8 michael_key[8];
+ u8 wapi_rsc[16];
+} __packed;
+
+#define HTT_RX_BA_INFO0_TID_MASK 0x000F
+#define HTT_RX_BA_INFO0_TID_LSB 0
+#define HTT_RX_BA_INFO0_PEER_ID_MASK 0xFFF0
+#define HTT_RX_BA_INFO0_PEER_ID_LSB 4
+
+struct htt_rx_addba {
+ u8 window_size;
+ __le16 info0; /* %HTT_RX_BA_INFO0_ */
+} __packed;
+
+struct htt_rx_delba {
+ u8 rsvd0;
+ __le16 info0; /* %HTT_RX_BA_INFO0_ */
+} __packed;
+
+enum htt_data_tx_status {
+ HTT_DATA_TX_STATUS_OK = 0,
+ HTT_DATA_TX_STATUS_DISCARD = 1,
+ HTT_DATA_TX_STATUS_NO_ACK = 2,
+ HTT_DATA_TX_STATUS_POSTPONE = 3, /* HL only */
+ HTT_DATA_TX_STATUS_DOWNLOAD_FAIL = 128
+};
+
+enum htt_data_tx_flags {
+#define HTT_DATA_TX_STATUS_MASK 0x07
+#define HTT_DATA_TX_STATUS_LSB 0
+#define HTT_DATA_TX_TID_MASK 0x78
+#define HTT_DATA_TX_TID_LSB 3
+ HTT_DATA_TX_TID_INVALID = 1 << 7
+};
+
+#define HTT_TX_COMPL_INV_MSDU_ID 0xFFFF
+
+struct htt_data_tx_completion {
+ union {
+ u8 flags;
+ struct {
+ u8 status:3,
+ tid:4,
+ tid_invalid:1;
+ } __packed;
+ } __packed;
+ u8 num_msdus;
+ u8 rsvd0;
+ __le16 msdus[0]; /* variable length based on %num_msdus */
+} __packed;
+
+struct htt_tx_compl_ind_base {
+ u32 hdr;
+ u16 payload[1/*or more*/];
+} __packed;
+
+struct htt_rc_tx_done_params {
+ u32 rate_code;
+ u32 rate_code_flags;
+ u32 flags;
+ u32 num_enqued; /* 1 for non-AMPDU */
+ u32 num_retries;
+ u32 num_failed; /* for AMPDU */
+ u32 ack_rssi;
+ u32 time_stamp;
+ u32 is_probe;
+};
+
+struct htt_rc_update {
+ u8 vdev_id;
+ __le16 peer_id;
+ u8 addr[6];
+ u8 num_elems;
+ u8 rsvd0;
+ struct htt_rc_tx_done_params params[0]; /* variable length %num_elems */
+} __packed;
+
+/* see htt_rx_indication for similar fields and descriptions */
+struct htt_rx_fragment_indication {
+ union {
+ u8 info0; /* %HTT_RX_FRAG_IND_INFO0_ */
+ struct {
+ u8 ext_tid:5,
+ flush_valid:1;
+ } __packed;
+ } __packed;
+ __le16 peer_id;
+ __le32 info1; /* %HTT_RX_FRAG_IND_INFO1_ */
+ __le16 fw_rx_desc_bytes;
+ __le16 rsvd0;
+
+ u8 fw_msdu_rx_desc[0];
+} __packed;
+
+#define HTT_RX_FRAG_IND_INFO0_EXT_TID_MASK 0x1F
+#define HTT_RX_FRAG_IND_INFO0_EXT_TID_LSB 0
+#define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_MASK 0x20
+#define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_LSB 5
+
+#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_MASK 0x0000003F
+#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_LSB 0
+#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK 0x00000FC0
+#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB 6
+
+/*
+ * target -> host test message definition
+ *
+ * The following field definitions describe the format of the test
+ * message sent from the target to the host.
+ * The message consists of a 4-octet header, followed by a variable
+ * number of 32-bit integer values, followed by a variable number
+ * of 8-bit character values.
+ *
+ * |31 16|15 8|7 0|
+ * |-----------------------------------------------------------|
+ * | num chars | num ints | msg type |
+ * |-----------------------------------------------------------|
+ * | int 0 |
+ * |-----------------------------------------------------------|
+ * | int 1 |
+ * |-----------------------------------------------------------|
+ * | ... |
+ * |-----------------------------------------------------------|
+ * | char 3 | char 2 | char 1 | char 0 |
+ * |-----------------------------------------------------------|
+ * | | | ... | char 4 |
+ * |-----------------------------------------------------------|
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this as a test message
+ * Value: HTT_MSG_TYPE_TEST
+ * - NUM_INTS
+ * Bits 15:8
+ * Purpose: indicate how many 32-bit integers follow the message header
+ * - NUM_CHARS
+ * Bits 31:16
+ * Purpose: indicate how many 8-bit charaters follow the series of integers
+ */
+struct htt_rx_test {
+ u8 num_ints;
+ __le16 num_chars;
+
+ /* payload consists of 2 lists:
+ * a) num_ints * sizeof(__le32)
+ * b) num_chars * sizeof(u8) aligned to 4bytes */
+ u8 payload[0];
+} __packed;
+
+static inline __le32 *htt_rx_test_get_ints(struct htt_rx_test *rx_test)
+{
+ return (__le32 *)rx_test->payload;
+}
+
+static inline u8 *htt_rx_test_get_chars(struct htt_rx_test *rx_test)
+{
+ return rx_test->payload + (rx_test->num_ints * sizeof(__le32));
+}
+
+/*
+ * target -> host packet log message
+ *
+ * The following field definitions describe the format of the packet log
+ * message sent from the target to the host.
+ * The message consists of a 4-octet header,followed by a variable number
+ * of 32-bit character values.
+ *
+ * |31 24|23 16|15 8|7 0|
+ * |-----------------------------------------------------------|
+ * | | | | msg type |
+ * |-----------------------------------------------------------|
+ * | payload |
+ * |-----------------------------------------------------------|
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this as a test message
+ * Value: HTT_MSG_TYPE_PACKETLOG
+ */
+struct htt_pktlog_msg {
+ u8 pad[3];
+ __le32 payload[1 /* or more */];
+} __packed;
+
+struct htt_dbg_stats_rx_reorder_stats {
+ /* Non QoS MPDUs received */
+ __le32 deliver_non_qos;
+
+ /* MPDUs received in-order */
+ __le32 deliver_in_order;
+
+ /* Flush due to reorder timer expired */
+ __le32 deliver_flush_timeout;
+
+ /* Flush due to move out of window */
+ __le32 deliver_flush_oow;
+
+ /* Flush due to DELBA */
+ __le32 deliver_flush_delba;
+
+ /* MPDUs dropped due to FCS error */
+ __le32 fcs_error;
+
+ /* MPDUs dropped due to monitor mode non-data packet */
+ __le32 mgmt_ctrl;
+
+ /* MPDUs dropped due to invalid peer */
+ __le32 invalid_peer;
+
+ /* MPDUs dropped due to duplication (non aggregation) */
+ __le32 dup_non_aggr;
+
+ /* MPDUs dropped due to processed before */
+ __le32 dup_past;
+
+ /* MPDUs dropped due to duplicate in reorder queue */
+ __le32 dup_in_reorder;
+
+ /* Reorder timeout happened */
+ __le32 reorder_timeout;
+
+ /* invalid bar ssn */
+ __le32 invalid_bar_ssn;
+
+ /* reorder reset due to bar ssn */
+ __le32 ssn_reset;
+};
+
+struct htt_dbg_stats_wal_tx_stats {
+ /* Num HTT cookies queued to dispatch list */
+ __le32 comp_queued;
+
+ /* Num HTT cookies dispatched */
+ __le32 comp_delivered;
+
+ /* Num MSDU queued to WAL */
+ __le32 msdu_enqued;
+
+ /* Num MPDU queue to WAL */
+ __le32 mpdu_enqued;
+
+ /* Num MSDUs dropped by WMM limit */
+ __le32 wmm_drop;
+
+ /* Num Local frames queued */
+ __le32 local_enqued;
+
+ /* Num Local frames done */
+ __le32 local_freed;
+
+ /* Num queued to HW */
+ __le32 hw_queued;
+
+ /* Num PPDU reaped from HW */
+ __le32 hw_reaped;
+
+ /* Num underruns */
+ __le32 underrun;
+
+ /* Num PPDUs cleaned up in TX abort */
+ __le32 tx_abort;
+
+ /* Num MPDUs requed by SW */
+ __le32 mpdus_requed;
+
+ /* excessive retries */
+ __le32 tx_ko;
+
+ /* data hw rate code */
+ __le32 data_rc;
+
+ /* Scheduler self triggers */
+ __le32 self_triggers;
+
+ /* frames dropped due to excessive sw retries */
+ __le32 sw_retry_failure;
+
+ /* illegal rate phy errors */
+ __le32 illgl_rate_phy_err;
+
+ /* wal pdev continous xretry */
+ __le32 pdev_cont_xretry;
+
+ /* wal pdev continous xretry */
+ __le32 pdev_tx_timeout;
+
+ /* wal pdev resets */
+ __le32 pdev_resets;
+
+ __le32 phy_underrun;
+
+ /* MPDU is more than txop limit */
+ __le32 txop_ovf;
+} __packed;
+
+struct htt_dbg_stats_wal_rx_stats {
+ /* Cnts any change in ring routing mid-ppdu */
+ __le32 mid_ppdu_route_change;
+
+ /* Total number of statuses processed */
+ __le32 status_rcvd;
+
+ /* Extra frags on rings 0-3 */
+ __le32 r0_frags;
+ __le32 r1_frags;
+ __le32 r2_frags;
+ __le32 r3_frags;
+
+ /* MSDUs / MPDUs delivered to HTT */
+ __le32 htt_msdus;
+ __le32 htt_mpdus;
+
+ /* MSDUs / MPDUs delivered to local stack */
+ __le32 loc_msdus;
+ __le32 loc_mpdus;
+
+ /* AMSDUs that have more MSDUs than the status ring size */
+ __le32 oversize_amsdu;
+
+ /* Number of PHY errors */
+ __le32 phy_errs;
+
+ /* Number of PHY errors drops */
+ __le32 phy_err_drop;
+
+ /* Number of mpdu errors - FCS, MIC, ENC etc. */
+ __le32 mpdu_errs;
+} __packed;
+
+struct htt_dbg_stats_wal_peer_stats {
+ __le32 dummy; /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */
+} __packed;
+
+struct htt_dbg_stats_wal_pdev_txrx {
+ struct htt_dbg_stats_wal_tx_stats tx_stats;
+ struct htt_dbg_stats_wal_rx_stats rx_stats;
+ struct htt_dbg_stats_wal_peer_stats peer_stats;
+} __packed;
+
+struct htt_dbg_stats_rx_rate_info {
+ __le32 mcs[10];
+ __le32 sgi[10];
+ __le32 nss[4];
+ __le32 stbc[10];
+ __le32 bw[3];
+ __le32 pream[6];
+ __le32 ldpc;
+ __le32 txbf;
+};
+
+/*
+ * htt_dbg_stats_status -
+ * present - The requested stats have been delivered in full.
+ * This indicates that either the stats information was contained
+ * in its entirety within this message, or else this message
+ * completes the delivery of the requested stats info that was
+ * partially delivered through earlier STATS_CONF messages.
+ * partial - The requested stats have been delivered in part.
+ * One or more subsequent STATS_CONF messages with the same
+ * cookie value will be sent to deliver the remainder of the
+ * information.
+ * error - The requested stats could not be delivered, for example due
+ * to a shortage of memory to construct a message holding the
+ * requested stats.
+ * invalid - The requested stat type is either not recognized, or the
+ * target is configured to not gather the stats type in question.
+ * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+ * series_done - This special value indicates that no further stats info
+ * elements are present within a series of stats info elems
+ * (within a stats upload confirmation message).
+ */
+enum htt_dbg_stats_status {
+ HTT_DBG_STATS_STATUS_PRESENT = 0,
+ HTT_DBG_STATS_STATUS_PARTIAL = 1,
+ HTT_DBG_STATS_STATUS_ERROR = 2,
+ HTT_DBG_STATS_STATUS_INVALID = 3,
+ HTT_DBG_STATS_STATUS_SERIES_DONE = 7
+};
+
+/*
+ * target -> host statistics upload
+ *
+ * The following field definitions describe the format of the HTT target
+ * to host stats upload confirmation message.
+ * The message contains a cookie echoed from the HTT host->target stats
+ * upload request, which identifies which request the confirmation is
+ * for, and a series of tag-length-value stats information elements.
+ * The tag-length header for each stats info element also includes a
+ * status field, to indicate whether the request for the stat type in
+ * question was fully met, partially met, unable to be met, or invalid
+ * (if the stat type in question is disabled in the target).
+ * A special value of all 1's in this status field is used to indicate
+ * the end of the series of stats info elements.
+ *
+ *
+ * |31 16|15 8|7 5|4 0|
+ * |------------------------------------------------------------|
+ * | reserved | msg type |
+ * |------------------------------------------------------------|
+ * | cookie LSBs |
+ * |------------------------------------------------------------|
+ * | cookie MSBs |
+ * |------------------------------------------------------------|
+ * | stats entry length | reserved | S |stat type|
+ * |------------------------------------------------------------|
+ * | |
+ * | type-specific stats info |
+ * | |
+ * |------------------------------------------------------------|
+ * | stats entry length | reserved | S |stat type|
+ * |------------------------------------------------------------|
+ * | |
+ * | type-specific stats info |
+ * | |
+ * |------------------------------------------------------------|
+ * | n/a | reserved | 111 | n/a |
+ * |------------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this is a statistics upload confirmation message
+ * Value: 0x9
+ * - COOKIE_LSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: LSBs of the opaque cookie specified by the host-side requestor
+ * - COOKIE_MSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: MSBs of the opaque cookie specified by the host-side requestor
+ *
+ * Stats Information Element tag-length header fields:
+ * - STAT_TYPE
+ * Bits 4:0
+ * Purpose: identifies the type of statistics info held in the
+ * following information element
+ * Value: htt_dbg_stats_type
+ * - STATUS
+ * Bits 7:5
+ * Purpose: indicate whether the requested stats are present
+ * Value: htt_dbg_stats_status, including a special value (0x7) to mark
+ * the completion of the stats entry series
+ * - LENGTH
+ * Bits 31:16
+ * Purpose: indicate the stats information size
+ * Value: This field specifies the number of bytes of stats information
+ * that follows the element tag-length header.
+ * It is expected but not required that this length is a multiple of
+ * 4 bytes. Even if the length is not an integer multiple of 4, the
+ * subsequent stats entry header will begin on a 4-byte aligned
+ * boundary.
+ */
+
+#define HTT_STATS_CONF_ITEM_INFO_STAT_TYPE_MASK 0x1F
+#define HTT_STATS_CONF_ITEM_INFO_STAT_TYPE_LSB 0
+#define HTT_STATS_CONF_ITEM_INFO_STATUS_MASK 0xE0
+#define HTT_STATS_CONF_ITEM_INFO_STATUS_LSB 5
+
+struct htt_stats_conf_item {
+ union {
+ u8 info;
+ struct {
+ u8 stat_type:5; /* %HTT_DBG_STATS_ */
+ u8 status:3; /* %HTT_DBG_STATS_STATUS_ */
+ } __packed;
+ } __packed;
+ u8 pad;
+ __le16 length;
+ u8 payload[0]; /* roundup(length, 4) long */
+} __packed;
+
+struct htt_stats_conf {
+ u8 pad[3];
+ __le32 cookie_lsb;
+ __le32 cookie_msb;
+
+ /* each item has variable length! */
+ struct htt_stats_conf_item items[0];
+} __packed;
+
+static inline struct htt_stats_conf_item *htt_stats_conf_next_item(
+ const struct htt_stats_conf_item *item)
+{
+ return (void *)item + sizeof(*item) + roundup(item->length, 4);
+}
+/*
+ * host -> target FRAG DESCRIPTOR/MSDU_EXT DESC bank
+ *
+ * The following field definitions describe the format of the HTT host
+ * to target frag_desc/msdu_ext bank configuration message.
+ * The message contains the based address and the min and max id of the
+ * MSDU_EXT/FRAG_DESC that will be used by the HTT to map MSDU DESC and
+ * MSDU_EXT/FRAG_DESC.
+ * HTT will use id in HTT descriptor instead sending the frag_desc_ptr.
+ * For QCA988X HW the firmware will use fragment_desc_ptr but in WIFI2.0
+ * the hardware does the mapping/translation.
+ *
+ * Total banks that can be configured is configured to 16.
+ *
+ * This should be called before any TX has be initiated by the HTT
+ *
+ * |31 16|15 8|7 5|4 0|
+ * |------------------------------------------------------------|
+ * | DESC_SIZE | NUM_BANKS | RES |SWP|pdev| msg type |
+ * |------------------------------------------------------------|
+ * | BANK0_BASE_ADDRESS |
+ * |------------------------------------------------------------|
+ * | ... |
+ * |------------------------------------------------------------|
+ * | BANK15_BASE_ADDRESS |
+ * |------------------------------------------------------------|
+ * | BANK0_MAX_ID | BANK0_MIN_ID |
+ * |------------------------------------------------------------|
+ * | ... |
+ * |------------------------------------------------------------|
+ * | BANK15_MAX_ID | BANK15_MIN_ID |
+ * |------------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Value: 0x6
+ * - BANKx_BASE_ADDRESS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to specify the base address of the MSDU_EXT
+ * bank physical/bus address.
+ * - BANKx_MIN_ID
+ * Bits 15:0
+ * Purpose: Provide a mechanism to specify the min index that needs to
+ * mapped.
+ * - BANKx_MAX_ID
+ * Bits 31:16
+ * Purpose: Provide a mechanism to specify the max index that needs to
+ *
+ */
+struct htt_frag_desc_bank_id {
+ __le16 bank_min_id;
+ __le16 bank_max_id;
+} __packed;
+
+/* real is 16 but it wouldn't fit in the max htt message size
+ * so we use a conservatively safe value for now */
+#define HTT_FRAG_DESC_BANK_MAX 4
+
+#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03
+#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB 0
+#define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP (1 << 2)
+
+struct htt_frag_desc_bank_cfg {
+ u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
+ u8 num_banks;
+ u8 desc_size;
+ __le32 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
+ struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX];
+} __packed;
+
+union htt_rx_pn_t {
+ /* WEP: 24-bit PN */
+ u32 pn24;
+
+ /* TKIP or CCMP: 48-bit PN */
+ u_int64_t pn48;
+
+ /* WAPI: 128-bit PN */
+ u_int64_t pn128[2];
+};
+
+struct htt_cmd {
+ struct htt_cmd_hdr hdr;
+ union {
+ struct htt_ver_req ver_req;
+ struct htt_mgmt_tx_desc mgmt_tx;
+ struct htt_data_tx_desc data_tx;
+ struct htt_rx_ring_setup rx_setup;
+ struct htt_stats_req stats_req;
+ struct htt_oob_sync_req oob_sync_req;
+ struct htt_aggr_conf aggr_conf;
+ struct htt_frag_desc_bank_cfg frag_desc_bank_cfg;
+ };
+} __packed;
+
+struct htt_resp {
+ struct htt_resp_hdr hdr;
+ union {
+ struct htt_ver_resp ver_resp;
+ struct htt_mgmt_tx_completion mgmt_tx_completion;
+ struct htt_data_tx_completion data_tx_completion;
+ struct htt_rx_indication rx_ind;
+ struct htt_rx_fragment_indication rx_frag_ind;
+ struct htt_rx_peer_map peer_map;
+ struct htt_rx_peer_unmap peer_unmap;
+ struct htt_rx_flush rx_flush;
+ struct htt_rx_addba rx_addba;
+ struct htt_rx_delba rx_delba;
+ struct htt_security_indication security_indication;
+ struct htt_rc_update rc_update;
+ struct htt_rx_test rx_test;
+ struct htt_pktlog_msg pktlog_msg;
+ struct htt_stats_conf stats_conf;
+ };
+} __packed;
+
+
+/*** host side structures follow ***/
+
+struct htt_tx_done {
+ u32 msdu_id;
+ bool discard;
+ bool no_ack;
+};
+
+struct htt_peer_map_event {
+ u8 vdev_id;
+ u16 peer_id;
+ u8 addr[ETH_ALEN];
+};
+
+struct htt_peer_unmap_event {
+ u16 peer_id;
+};
+
+struct htt_rx_info {
+ struct sk_buff *skb;
+ enum htt_rx_mpdu_status status;
+ enum htt_rx_mpdu_encrypt_type encrypt_type;
+ s8 signal;
+ struct {
+ u8 info0;
+ u32 info1;
+ u32 info2;
+ } rate;
+ bool fcs_err;
+};
+
+struct ath10k_htt {
+ struct ath10k *ar;
+ enum ath10k_htc_ep_id eid;
+
+ int max_throughput_mbps;
+ u8 target_version_major;
+ u8 target_version_minor;
+ struct completion target_version_received;
+
+ struct {
+ /*
+ * Ring of network buffer objects - This ring is
+ * used exclusively by the host SW. This ring
+ * mirrors the dev_addrs_ring that is shared
+ * between the host SW and the MAC HW. The host SW
+ * uses this netbufs ring to locate the network
+ * buffer objects whose data buffers the HW has
+ * filled.
+ */
+ struct sk_buff **netbufs_ring;
+ /*
+ * Ring of buffer addresses -
+ * This ring holds the "physical" device address of the
+ * rx buffers the host SW provides for the MAC HW to
+ * fill.
+ */
+ __le32 *paddrs_ring;
+
+ /*
+ * Base address of ring, as a "physical" device address
+ * rather than a CPU address.
+ */
+ dma_addr_t base_paddr;
+
+ /* how many elems in the ring (power of 2) */
+ int size;
+
+ /* size - 1 */
+ unsigned size_mask;
+
+ /* how many rx buffers to keep in the ring */
+ int fill_level;
+
+ /* how many rx buffers (full+empty) are in the ring */
+ int fill_cnt;
+
+ /*
+ * alloc_idx - where HTT SW has deposited empty buffers
+ * This is allocated in consistent mem, so that the FW can
+ * read this variable, and program the HW's FW_IDX reg with
+ * the value of this shadow register.
+ */
+ struct {
+ __le32 *vaddr;
+ dma_addr_t paddr;
+ } alloc_idx;
+
+ /* where HTT SW has processed bufs filled by rx MAC DMA */
+ struct {
+ unsigned msdu_payld;
+ } sw_rd_idx;
+
+ /*
+ * refill_retry_timer - timer triggered when the ring is
+ * not refilled to the level expected
+ */
+ struct timer_list refill_retry_timer;
+
+ /* Protects access to all rx ring buffer state variables */
+ spinlock_t lock;
+ } rx_ring;
+
+ unsigned int prefetch_len;
+
+ /* Protects access to %pending_tx, %used_msdu_ids */
+ spinlock_t tx_lock;
+ int max_num_pending_tx;
+ int num_pending_tx;
+ struct sk_buff **pending_tx;
+ unsigned long *used_msdu_ids; /* bitmap */
+ wait_queue_head_t empty_tx_wq;
+
+ /* set if host-fw communication goes haywire
+ * used to avoid further failures */
+ bool rx_confused;
+};
+
+#define RX_HTT_HDR_STATUS_LEN 64
+
+/* This structure layout is programmed via rx ring setup
+ * so that FW knows how to transfer the rx descriptor to the host.
+ * Buffers like this are placed on the rx ring. */
+struct htt_rx_desc {
+ union {
+ /* This field is filled on the host using the msdu buffer
+ * from htt_rx_indication */
+ struct fw_rx_desc_base fw_desc;
+ u32 pad;
+ } __packed;
+ struct {
+ struct rx_attention attention;
+ struct rx_frag_info frag_info;
+ struct rx_mpdu_start mpdu_start;
+ struct rx_msdu_start msdu_start;
+ struct rx_msdu_end msdu_end;
+ struct rx_mpdu_end mpdu_end;
+ struct rx_ppdu_start ppdu_start;
+ struct rx_ppdu_end ppdu_end;
+ } __packed;
+ u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
+ u8 msdu_payload[0];
+};
+
+#define HTT_RX_DESC_ALIGN 8
+
+#define HTT_MAC_ADDR_LEN 6
+
+/*
+ * FIX THIS
+ * Should be: sizeof(struct htt_host_rx_desc) + max rx MSDU size,
+ * rounded up to a cache line size.
+ */
+#define HTT_RX_BUF_SIZE 1920
+#define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc))
+
+/*
+ * DMA_MAP expects the buffer to be an integral number of cache lines.
+ * Rather than checking the actual cache line size, this code makes a
+ * conservative estimate of what the cache line size could be.
+ */
+#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
+#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
+
+struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar);
+int ath10k_htt_attach_target(struct ath10k_htt *htt);
+void ath10k_htt_detach(struct ath10k_htt *htt);
+
+int ath10k_htt_tx_attach(struct ath10k_htt *htt);
+void ath10k_htt_tx_detach(struct ath10k_htt *htt);
+int ath10k_htt_rx_attach(struct ath10k_htt *htt);
+void ath10k_htt_rx_detach(struct ath10k_htt *htt);
+void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
+int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
+
+void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
+int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt);
+void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
+int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
+int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *);
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
new file mode 100644
index 000000000000..de058d7adca8
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -0,0 +1,1167 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+#include "htt.h"
+#include "txrx.h"
+#include "debug.h"
+
+#include <linux/log2.h>
+
+/* slightly larger than one large A-MPDU */
+#define HTT_RX_RING_SIZE_MIN 128
+
+/* roughly 20 ms @ 1 Gbps of 1500B MSDUs */
+#define HTT_RX_RING_SIZE_MAX 2048
+
+#define HTT_RX_AVG_FRM_BYTES 1000
+
+/* ms, very conservative */
+#define HTT_RX_HOST_LATENCY_MAX_MS 20
+
+/* ms, conservative */
+#define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
+
+/* when under memory pressure rx ring refill may fail and needs a retry */
+#define HTT_RX_RING_REFILL_RETRY_MS 50
+
+static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
+{
+ int size;
+
+ /*
+ * It is expected that the host CPU will typically be able to
+ * service the rx indication from one A-MPDU before the rx
+ * indication from the subsequent A-MPDU happens, roughly 1-2 ms
+ * later. However, the rx ring should be sized very conservatively,
+ * to accomodate the worst reasonable delay before the host CPU
+ * services a rx indication interrupt.
+ *
+ * The rx ring need not be kept full of empty buffers. In theory,
+ * the htt host SW can dynamically track the low-water mark in the
+ * rx ring, and dynamically adjust the level to which the rx ring
+ * is filled with empty buffers, to dynamically meet the desired
+ * low-water mark.
+ *
+ * In contrast, it's difficult to resize the rx ring itself, once
+ * it's in use. Thus, the ring itself should be sized very
+ * conservatively, while the degree to which the ring is filled
+ * with empty buffers should be sized moderately conservatively.
+ */
+
+ /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
+ size =
+ htt->max_throughput_mbps +
+ 1000 /
+ (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
+
+ if (size < HTT_RX_RING_SIZE_MIN)
+ size = HTT_RX_RING_SIZE_MIN;
+
+ if (size > HTT_RX_RING_SIZE_MAX)
+ size = HTT_RX_RING_SIZE_MAX;
+
+ size = roundup_pow_of_two(size);
+
+ return size;
+}
+
+static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt)
+{
+ int size;
+
+ /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
+ size =
+ htt->max_throughput_mbps *
+ 1000 /
+ (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
+
+ /*
+ * Make sure the fill level is at least 1 less than the ring size.
+ * Leaving 1 element empty allows the SW to easily distinguish
+ * between a full ring vs. an empty ring.
+ */
+ if (size >= htt->rx_ring.size)
+ size = htt->rx_ring.size - 1;
+
+ return size;
+}
+
+static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
+{
+ struct sk_buff *skb;
+ struct ath10k_skb_cb *cb;
+ int i;
+
+ for (i = 0; i < htt->rx_ring.fill_cnt; i++) {
+ skb = htt->rx_ring.netbufs_ring[i];
+ cb = ATH10K_SKB_CB(skb);
+ dma_unmap_single(htt->ar->dev, cb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+
+ htt->rx_ring.fill_cnt = 0;
+}
+
+static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
+{
+ struct htt_rx_desc *rx_desc;
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+ int ret = 0, idx;
+
+ idx = __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr));
+ while (num > 0) {
+ skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
+ skb_pull(skb,
+ PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
+ skb->data);
+
+ /* Clear rx_desc attention word before posting to Rx ring */
+ rx_desc = (struct htt_rx_desc *)skb->data;
+ rx_desc->attention.flags = __cpu_to_le32(0);
+
+ paddr = dma_map_single(htt->ar->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
+ dev_kfree_skb_any(skb);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ATH10K_SKB_CB(skb)->paddr = paddr;
+ htt->rx_ring.netbufs_ring[idx] = skb;
+ htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
+ htt->rx_ring.fill_cnt++;
+
+ num--;
+ idx++;
+ idx &= htt->rx_ring.size_mask;
+ }
+
+fail:
+ *(htt->rx_ring.alloc_idx.vaddr) = __cpu_to_le32(idx);
+ return ret;
+}
+
+static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
+{
+ lockdep_assert_held(&htt->rx_ring.lock);
+ return __ath10k_htt_rx_ring_fill_n(htt, num);
+}
+
+static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
+{
+ int ret, num_to_fill;
+
+ spin_lock_bh(&htt->rx_ring.lock);
+ num_to_fill = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
+ ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
+ if (ret == -ENOMEM) {
+ /*
+ * Failed to fill it to the desired level -
+ * we'll start a timer and try again next time.
+ * As long as enough buffers are left in the ring for
+ * another A-MPDU rx, no special recovery is needed.
+ */
+ mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
+ msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
+ }
+ spin_unlock_bh(&htt->rx_ring.lock);
+}
+
+static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
+{
+ struct ath10k_htt *htt = (struct ath10k_htt *)arg;
+ ath10k_htt_rx_msdu_buff_replenish(htt);
+}
+
+static unsigned ath10k_htt_rx_ring_elems(struct ath10k_htt *htt)
+{
+ return (__le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr) -
+ htt->rx_ring.sw_rd_idx.msdu_payld) & htt->rx_ring.size_mask;
+}
+
+void ath10k_htt_rx_detach(struct ath10k_htt *htt)
+{
+ int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
+
+ del_timer_sync(&htt->rx_ring.refill_retry_timer);
+
+ while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
+ struct sk_buff *skb =
+ htt->rx_ring.netbufs_ring[sw_rd_idx];
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+
+ dma_unmap_single(htt->ar->dev, cb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(htt->rx_ring.netbufs_ring[sw_rd_idx]);
+ sw_rd_idx++;
+ sw_rd_idx &= htt->rx_ring.size_mask;
+ }
+
+ dma_free_coherent(htt->ar->dev,
+ (htt->rx_ring.size *
+ sizeof(htt->rx_ring.paddrs_ring)),
+ htt->rx_ring.paddrs_ring,
+ htt->rx_ring.base_paddr);
+
+ dma_free_coherent(htt->ar->dev,
+ sizeof(*htt->rx_ring.alloc_idx.vaddr),
+ htt->rx_ring.alloc_idx.vaddr,
+ htt->rx_ring.alloc_idx.paddr);
+
+ kfree(htt->rx_ring.netbufs_ring);
+}
+
+static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
+{
+ int idx;
+ struct sk_buff *msdu;
+
+ spin_lock_bh(&htt->rx_ring.lock);
+
+ if (ath10k_htt_rx_ring_elems(htt) == 0)
+ ath10k_warn("htt rx ring is empty!\n");
+
+ idx = htt->rx_ring.sw_rd_idx.msdu_payld;
+ msdu = htt->rx_ring.netbufs_ring[idx];
+
+ idx++;
+ idx &= htt->rx_ring.size_mask;
+ htt->rx_ring.sw_rd_idx.msdu_payld = idx;
+ htt->rx_ring.fill_cnt--;
+
+ spin_unlock_bh(&htt->rx_ring.lock);
+ return msdu;
+}
+
+static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)
+{
+ struct sk_buff *next;
+
+ while (skb) {
+ next = skb->next;
+ dev_kfree_skb_any(skb);
+ skb = next;
+ }
+}
+
+static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
+ u8 **fw_desc, int *fw_desc_len,
+ struct sk_buff **head_msdu,
+ struct sk_buff **tail_msdu)
+{
+ int msdu_len, msdu_chaining = 0;
+ struct sk_buff *msdu;
+ struct htt_rx_desc *rx_desc;
+
+ if (ath10k_htt_rx_ring_elems(htt) == 0)
+ ath10k_warn("htt rx ring is empty!\n");
+
+ if (htt->rx_confused) {
+ ath10k_warn("htt is confused. refusing rx\n");
+ return 0;
+ }
+
+ msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt);
+ while (msdu) {
+ int last_msdu, msdu_len_invalid, msdu_chained;
+
+ dma_unmap_single(htt->ar->dev,
+ ATH10K_SKB_CB(msdu)->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ",
+ msdu->data, msdu->len + skb_tailroom(msdu));
+
+ rx_desc = (struct htt_rx_desc *)msdu->data;
+
+ /* FIXME: we must report msdu payload since this is what caller
+ * expects now */
+ skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
+ skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
+
+ /*
+ * Sanity check - confirm the HW is finished filling in the
+ * rx data.
+ * If the HW and SW are working correctly, then it's guaranteed
+ * that the HW's MAC DMA is done before this point in the SW.
+ * To prevent the case that we handle a stale Rx descriptor,
+ * just assert for now until we have a way to recover.
+ */
+ if (!(__le32_to_cpu(rx_desc->attention.flags)
+ & RX_ATTENTION_FLAGS_MSDU_DONE)) {
+ ath10k_htt_rx_free_msdu_chain(*head_msdu);
+ *head_msdu = NULL;
+ msdu = NULL;
+ ath10k_err("htt rx stopped. cannot recover\n");
+ htt->rx_confused = true;
+ break;
+ }
+
+ /*
+ * Copy the FW rx descriptor for this MSDU from the rx
+ * indication message into the MSDU's netbuf. HL uses the
+ * same rx indication message definition as LL, and simply
+ * appends new info (fields from the HW rx desc, and the
+ * MSDU payload itself). So, the offset into the rx
+ * indication message only has to account for the standard
+ * offset of the per-MSDU FW rx desc info within the
+ * message, and how many bytes of the per-MSDU FW rx desc
+ * info have already been consumed. (And the endianness of
+ * the host, since for a big-endian host, the rx ind
+ * message contents, including the per-MSDU rx desc bytes,
+ * were byteswapped during upload.)
+ */
+ if (*fw_desc_len > 0) {
+ rx_desc->fw_desc.info0 = **fw_desc;
+ /*
+ * The target is expected to only provide the basic
+ * per-MSDU rx descriptors. Just to be sure, verify
+ * that the target has not attached extension data
+ * (e.g. LRO flow ID).
+ */
+
+ /* or more, if there's extension data */
+ (*fw_desc)++;
+ (*fw_desc_len)--;
+ } else {
+ /*
+ * When an oversized AMSDU happened, FW will lost
+ * some of MSDU status - in this case, the FW
+ * descriptors provided will be less than the
+ * actual MSDUs inside this MPDU. Mark the FW
+ * descriptors so that it will still deliver to
+ * upper stack, if no CRC error for this MPDU.
+ *
+ * FIX THIS - the FW descriptors are actually for
+ * MSDUs in the end of this A-MSDU instead of the
+ * beginning.
+ */
+ rx_desc->fw_desc.info0 = 0;
+ }
+
+ msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
+ & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
+ RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
+ msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
+ RX_MSDU_START_INFO0_MSDU_LENGTH);
+ msdu_chained = rx_desc->frag_info.ring2_more_count;
+
+ if (msdu_len_invalid)
+ msdu_len = 0;
+
+ skb_trim(msdu, 0);
+ skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
+ msdu_len -= msdu->len;
+
+ /* FIXME: Do chained buffers include htt_rx_desc or not? */
+ while (msdu_chained--) {
+ struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
+
+ dma_unmap_single(htt->ar->dev,
+ ATH10K_SKB_CB(next)->paddr,
+ next->len + skb_tailroom(next),
+ DMA_FROM_DEVICE);
+
+ ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ",
+ next->data,
+ next->len + skb_tailroom(next));
+
+ skb_trim(next, 0);
+ skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE));
+ msdu_len -= next->len;
+
+ msdu->next = next;
+ msdu = next;
+ msdu_chaining = 1;
+ }
+
+ if (msdu_len > 0) {
+ /* This may suggest FW bug? */
+ ath10k_warn("htt rx msdu len not consumed (%d)\n",
+ msdu_len);
+ }
+
+ last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
+ RX_MSDU_END_INFO0_LAST_MSDU;
+
+ if (last_msdu) {
+ msdu->next = NULL;
+ break;
+ } else {
+ struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
+ msdu->next = next;
+ msdu = next;
+ }
+ }
+ *tail_msdu = msdu;
+
+ /*
+ * Don't refill the ring yet.
+ *
+ * First, the elements popped here are still in use - it is not
+ * safe to overwrite them until the matching call to
+ * mpdu_desc_list_next. Second, for efficiency it is preferable to
+ * refill the rx ring with 1 PPDU's worth of rx buffers (something
+ * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
+ * (something like 3 buffers). Consequently, we'll rely on the txrx
+ * SW to tell us when it is done pulling all the PPDU's rx buffers
+ * out of the rx ring, and then refill it just once.
+ */
+
+ return msdu_chaining;
+}
+
+int ath10k_htt_rx_attach(struct ath10k_htt *htt)
+{
+ dma_addr_t paddr;
+ void *vaddr;
+ struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
+
+ htt->rx_ring.size = ath10k_htt_rx_ring_size(htt);
+ if (!is_power_of_2(htt->rx_ring.size)) {
+ ath10k_warn("htt rx ring size is not power of 2\n");
+ return -EINVAL;
+ }
+
+ htt->rx_ring.size_mask = htt->rx_ring.size - 1;
+
+ /*
+ * Set the initial value for the level to which the rx ring
+ * should be filled, based on the max throughput and the
+ * worst likely latency for the host to fill the rx ring
+ * with new buffers. In theory, this fill level can be
+ * dynamically adjusted from the initial value set here, to
+ * reflect the actual host latency rather than a
+ * conservative assumption about the host latency.
+ */
+ htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);
+
+ htt->rx_ring.netbufs_ring =
+ kmalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (!htt->rx_ring.netbufs_ring)
+ goto err_netbuf;
+
+ vaddr = dma_alloc_coherent(htt->ar->dev,
+ (htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring)),
+ &paddr, GFP_DMA);
+ if (!vaddr)
+ goto err_dma_ring;
+
+ htt->rx_ring.paddrs_ring = vaddr;
+ htt->rx_ring.base_paddr = paddr;
+
+ vaddr = dma_alloc_coherent(htt->ar->dev,
+ sizeof(*htt->rx_ring.alloc_idx.vaddr),
+ &paddr, GFP_DMA);
+ if (!vaddr)
+ goto err_dma_idx;
+
+ htt->rx_ring.alloc_idx.vaddr = vaddr;
+ htt->rx_ring.alloc_idx.paddr = paddr;
+ htt->rx_ring.sw_rd_idx.msdu_payld = 0;
+ *htt->rx_ring.alloc_idx.vaddr = 0;
+
+ /* Initialize the Rx refill retry timer */
+ setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
+
+ spin_lock_init(&htt->rx_ring.lock);
+
+ htt->rx_ring.fill_cnt = 0;
+ if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
+ goto err_fill_ring;
+
+ ath10k_dbg(ATH10K_DBG_HTT, "HTT RX ring size: %d, fill_level: %d\n",
+ htt->rx_ring.size, htt->rx_ring.fill_level);
+ return 0;
+
+err_fill_ring:
+ ath10k_htt_rx_ring_free(htt);
+ dma_free_coherent(htt->ar->dev,
+ sizeof(*htt->rx_ring.alloc_idx.vaddr),
+ htt->rx_ring.alloc_idx.vaddr,
+ htt->rx_ring.alloc_idx.paddr);
+err_dma_idx:
+ dma_free_coherent(htt->ar->dev,
+ (htt->rx_ring.size *
+ sizeof(htt->rx_ring.paddrs_ring)),
+ htt->rx_ring.paddrs_ring,
+ htt->rx_ring.base_paddr);
+err_dma_ring:
+ kfree(htt->rx_ring.netbufs_ring);
+err_netbuf:
+ return -ENOMEM;
+}
+
+static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type)
+{
+ switch (type) {
+ case HTT_RX_MPDU_ENCRYPT_WEP40:
+ case HTT_RX_MPDU_ENCRYPT_WEP104:
+ return 4;
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
+ case HTT_RX_MPDU_ENCRYPT_WEP128: /* not tested */
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
+ case HTT_RX_MPDU_ENCRYPT_WAPI: /* not tested */
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
+ return 8;
+ case HTT_RX_MPDU_ENCRYPT_NONE:
+ return 0;
+ }
+
+ ath10k_warn("unknown encryption type %d\n", type);
+ return 0;
+}
+
+static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type)
+{
+ switch (type) {
+ case HTT_RX_MPDU_ENCRYPT_NONE:
+ case HTT_RX_MPDU_ENCRYPT_WEP40:
+ case HTT_RX_MPDU_ENCRYPT_WEP104:
+ case HTT_RX_MPDU_ENCRYPT_WEP128:
+ case HTT_RX_MPDU_ENCRYPT_WAPI:
+ return 0;
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
+ return 4;
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
+ return 8;
+ }
+
+ ath10k_warn("unknown encryption type %d\n", type);
+ return 0;
+}
+
+/* Applies for first msdu in chain, before altering it. */
+static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb)
+{
+ struct htt_rx_desc *rxd;
+ enum rx_msdu_decap_format fmt;
+
+ rxd = (void *)skb->data - sizeof(*rxd);
+ fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
+ RX_MSDU_START_INFO1_DECAP_FORMAT);
+
+ if (fmt == RX_MSDU_DECAP_RAW)
+ return (void *)skb->data;
+ else
+ return (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
+}
+
+/* This function only applies for first msdu in an msdu chain */
+static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)
+{
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ if (qc[0] & 0x80)
+ return true;
+ }
+ return false;
+}
+
+static int ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
+ struct htt_rx_info *info)
+{
+ struct htt_rx_desc *rxd;
+ struct sk_buff *amsdu;
+ struct sk_buff *first;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff *skb = info->skb;
+ enum rx_msdu_decap_format fmt;
+ enum htt_rx_mpdu_encrypt_type enctype;
+ unsigned int hdr_len;
+ int crypto_len;
+
+ rxd = (void *)skb->data - sizeof(*rxd);
+ fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
+ RX_MSDU_START_INFO1_DECAP_FORMAT);
+ enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+
+ /* FIXME: No idea what assumptions are safe here. Need logs */
+ if ((fmt == RX_MSDU_DECAP_RAW && skb->next) ||
+ (fmt == RX_MSDU_DECAP_8023_SNAP_LLC)) {
+ ath10k_htt_rx_free_msdu_chain(skb->next);
+ skb->next = NULL;
+ return -ENOTSUPP;
+ }
+
+ /* A-MSDU max is a little less than 8K */
+ amsdu = dev_alloc_skb(8*1024);
+ if (!amsdu) {
+ ath10k_warn("A-MSDU allocation failed\n");
+ ath10k_htt_rx_free_msdu_chain(skb->next);
+ skb->next = NULL;
+ return -ENOMEM;
+ }
+
+ if (fmt >= RX_MSDU_DECAP_NATIVE_WIFI) {
+ int hdrlen;
+
+ hdr = (void *)rxd->rx_hdr_status;
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ memcpy(skb_put(amsdu, hdrlen), hdr, hdrlen);
+ }
+
+ first = skb;
+ while (skb) {
+ void *decap_hdr;
+ int decap_len = 0;
+
+ rxd = (void *)skb->data - sizeof(*rxd);
+ fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
+ RX_MSDU_START_INFO1_DECAP_FORMAT);
+ decap_hdr = (void *)rxd->rx_hdr_status;
+
+ if (skb == first) {
+ /* We receive linked A-MSDU subframe skbuffs. The
+ * first one contains the original 802.11 header (and
+ * possible crypto param) in the RX descriptor. The
+ * A-MSDU subframe header follows that. Each part is
+ * aligned to 4 byte boundary. */
+
+ hdr = (void *)amsdu->data;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath10k_htt_rx_crypto_param_len(enctype);
+
+ decap_hdr += roundup(hdr_len, 4);
+ decap_hdr += roundup(crypto_len, 4);
+ }
+
+ if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
+ /* Ethernet2 decap inserts ethernet header in place of
+ * A-MSDU subframe header. */
+ skb_pull(skb, 6 + 6 + 2);
+
+ /* A-MSDU subframe header length */
+ decap_len += 6 + 6 + 2;
+
+ /* Ethernet2 decap also strips the LLC/SNAP so we need
+ * to re-insert it. The LLC/SNAP follows A-MSDU
+ * subframe header. */
+ /* FIXME: Not all LLCs are 8 bytes long */
+ decap_len += 8;
+
+ memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
+ }
+
+ if (fmt == RX_MSDU_DECAP_NATIVE_WIFI) {
+ /* Native Wifi decap inserts regular 802.11 header
+ * in place of A-MSDU subframe header. */
+ hdr = (struct ieee80211_hdr *)skb->data;
+ skb_pull(skb, ieee80211_hdrlen(hdr->frame_control));
+
+ /* A-MSDU subframe header length */
+ decap_len += 6 + 6 + 2;
+
+ memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
+ }
+
+ if (fmt == RX_MSDU_DECAP_RAW)
+ skb_trim(skb, skb->len - 4); /* remove FCS */
+
+ memcpy(skb_put(amsdu, skb->len), skb->data, skb->len);
+
+ /* A-MSDU subframes are padded to 4bytes
+ * but relative to first subframe, not the whole MPDU */
+ if (skb->next && ((decap_len + skb->len) & 3)) {
+ int padlen = 4 - ((decap_len + skb->len) & 3);
+ memset(skb_put(amsdu, padlen), 0, padlen);
+ }
+
+ skb = skb->next;
+ }
+
+ info->skb = amsdu;
+ info->encrypt_type = enctype;
+
+ ath10k_htt_rx_free_msdu_chain(first);
+
+ return 0;
+}
+
+static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
+{
+ struct sk_buff *skb = info->skb;
+ struct htt_rx_desc *rxd;
+ struct ieee80211_hdr *hdr;
+ enum rx_msdu_decap_format fmt;
+ enum htt_rx_mpdu_encrypt_type enctype;
+
+ /* This shouldn't happen. If it does than it may be a FW bug. */
+ if (skb->next) {
+ ath10k_warn("received chained non A-MSDU frame\n");
+ ath10k_htt_rx_free_msdu_chain(skb->next);
+ skb->next = NULL;
+ }
+
+ rxd = (void *)skb->data - sizeof(*rxd);
+ fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
+ RX_MSDU_START_INFO1_DECAP_FORMAT);
+ enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+ hdr = (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
+
+ switch (fmt) {
+ case RX_MSDU_DECAP_RAW:
+ /* remove trailing FCS */
+ skb_trim(skb, skb->len - 4);
+ break;
+ case RX_MSDU_DECAP_NATIVE_WIFI:
+ /* nothing to do here */
+ break;
+ case RX_MSDU_DECAP_ETHERNET2_DIX:
+ /* macaddr[6] + macaddr[6] + ethertype[2] */
+ skb_pull(skb, 6 + 6 + 2);
+ break;
+ case RX_MSDU_DECAP_8023_SNAP_LLC:
+ /* macaddr[6] + macaddr[6] + len[2] */
+ /* we don't need this for non-A-MSDU */
+ skb_pull(skb, 6 + 6 + 2);
+ break;
+ }
+
+ if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
+ void *llc;
+ int llclen;
+
+ llclen = 8;
+ llc = hdr;
+ llc += roundup(ieee80211_hdrlen(hdr->frame_control), 4);
+ llc += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
+
+ skb_push(skb, llclen);
+ memcpy(skb->data, llc, llclen);
+ }
+
+ if (fmt >= RX_MSDU_DECAP_ETHERNET2_DIX) {
+ int len = ieee80211_hdrlen(hdr->frame_control);
+ skb_push(skb, len);
+ memcpy(skb->data, hdr, len);
+ }
+
+ info->skb = skb;
+ info->encrypt_type = enctype;
+ return 0;
+}
+
+static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb)
+{
+ struct htt_rx_desc *rxd;
+ u32 flags;
+
+ rxd = (void *)skb->data - sizeof(*rxd);
+ flags = __le32_to_cpu(rxd->attention.flags);
+
+ if (flags & RX_ATTENTION_FLAGS_DECRYPT_ERR)
+ return true;
+
+ return false;
+}
+
+static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb)
+{
+ struct htt_rx_desc *rxd;
+ u32 flags;
+
+ rxd = (void *)skb->data - sizeof(*rxd);
+ flags = __le32_to_cpu(rxd->attention.flags);
+
+ if (flags & RX_ATTENTION_FLAGS_FCS_ERR)
+ return true;
+
+ return false;
+}
+
+static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
+ struct htt_rx_indication *rx)
+{
+ struct htt_rx_info info;
+ struct htt_rx_indication_mpdu_range *mpdu_ranges;
+ struct ieee80211_hdr *hdr;
+ int num_mpdu_ranges;
+ int fw_desc_len;
+ u8 *fw_desc;
+ int i, j;
+ int ret;
+
+ memset(&info, 0, sizeof(info));
+
+ fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
+ fw_desc = (u8 *)&rx->fw_desc;
+
+ num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
+ HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
+ mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
+
+ ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
+ rx, sizeof(*rx) +
+ (sizeof(struct htt_rx_indication_mpdu_range) *
+ num_mpdu_ranges));
+
+ for (i = 0; i < num_mpdu_ranges; i++) {
+ info.status = mpdu_ranges[i].mpdu_range_status;
+
+ for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
+ struct sk_buff *msdu_head, *msdu_tail;
+ enum htt_rx_mpdu_status status;
+ int msdu_chaining;
+
+ msdu_head = NULL;
+ msdu_tail = NULL;
+ msdu_chaining = ath10k_htt_rx_amsdu_pop(htt,
+ &fw_desc,
+ &fw_desc_len,
+ &msdu_head,
+ &msdu_tail);
+
+ if (!msdu_head) {
+ ath10k_warn("htt rx no data!\n");
+ continue;
+ }
+
+ if (msdu_head->len == 0) {
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "htt rx dropping due to zero-len\n");
+ ath10k_htt_rx_free_msdu_chain(msdu_head);
+ continue;
+ }
+
+ if (ath10k_htt_rx_has_decrypt_err(msdu_head)) {
+ ath10k_htt_rx_free_msdu_chain(msdu_head);
+ continue;
+ }
+
+ status = info.status;
+
+ /* Skip mgmt frames while we handle this in WMI */
+ if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL) {
+ ath10k_htt_rx_free_msdu_chain(msdu_head);
+ continue;
+ }
+
+ if (status != HTT_RX_IND_MPDU_STATUS_OK &&
+ status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
+ !htt->ar->monitor_enabled) {
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "htt rx ignoring frame w/ status %d\n",
+ status);
+ ath10k_htt_rx_free_msdu_chain(msdu_head);
+ continue;
+ }
+
+ /* FIXME: we do not support chaining yet.
+ * this needs investigation */
+ if (msdu_chaining) {
+ ath10k_warn("msdu_chaining is true\n");
+ ath10k_htt_rx_free_msdu_chain(msdu_head);
+ continue;
+ }
+
+ info.skb = msdu_head;
+ info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
+ info.signal = ATH10K_DEFAULT_NOISE_FLOOR;
+ info.signal += rx->ppdu.combined_rssi;
+
+ info.rate.info0 = rx->ppdu.info0;
+ info.rate.info1 = __le32_to_cpu(rx->ppdu.info1);
+ info.rate.info2 = __le32_to_cpu(rx->ppdu.info2);
+
+ hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
+
+ if (ath10k_htt_rx_hdr_is_amsdu(hdr))
+ ret = ath10k_htt_rx_amsdu(htt, &info);
+ else
+ ret = ath10k_htt_rx_msdu(htt, &info);
+
+ if (ret && !info.fcs_err) {
+ ath10k_warn("error processing msdus %d\n", ret);
+ dev_kfree_skb_any(info.skb);
+ continue;
+ }
+
+ if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data))
+ ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n");
+
+ ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ",
+ info.skb->data, info.skb->len);
+ ath10k_process_rx(htt->ar, &info);
+ }
+ }
+
+ ath10k_htt_rx_msdu_buff_replenish(htt);
+}
+
+static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
+ struct htt_rx_fragment_indication *frag)
+{
+ struct sk_buff *msdu_head, *msdu_tail;
+ struct htt_rx_desc *rxd;
+ enum rx_msdu_decap_format fmt;
+ struct htt_rx_info info = {};
+ struct ieee80211_hdr *hdr;
+ int msdu_chaining;
+ bool tkip_mic_err;
+ bool decrypt_err;
+ u8 *fw_desc;
+ int fw_desc_len, hdrlen, paramlen;
+ int trim;
+
+ fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
+ fw_desc = (u8 *)frag->fw_msdu_rx_desc;
+
+ msdu_head = NULL;
+ msdu_tail = NULL;
+ msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
+ &msdu_head, &msdu_tail);
+
+ ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
+
+ if (!msdu_head) {
+ ath10k_warn("htt rx frag no data\n");
+ return;
+ }
+
+ if (msdu_chaining || msdu_head != msdu_tail) {
+ ath10k_warn("aggregation with fragmentation?!\n");
+ ath10k_htt_rx_free_msdu_chain(msdu_head);
+ return;
+ }
+
+ /* FIXME: implement signal strength */
+
+ hdr = (struct ieee80211_hdr *)msdu_head->data;
+ rxd = (void *)msdu_head->data - sizeof(*rxd);
+ tkip_mic_err = !!(__le32_to_cpu(rxd->attention.flags) &
+ RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
+ decrypt_err = !!(__le32_to_cpu(rxd->attention.flags) &
+ RX_ATTENTION_FLAGS_DECRYPT_ERR);
+ fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
+ RX_MSDU_START_INFO1_DECAP_FORMAT);
+
+ if (fmt != RX_MSDU_DECAP_RAW) {
+ ath10k_warn("we dont support non-raw fragmented rx yet\n");
+ dev_kfree_skb_any(msdu_head);
+ goto end;
+ }
+
+ info.skb = msdu_head;
+ info.status = HTT_RX_IND_MPDU_STATUS_OK;
+ info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+
+ if (tkip_mic_err) {
+ ath10k_warn("tkip mic error\n");
+ info.status = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR;
+ }
+
+ if (decrypt_err) {
+ ath10k_warn("decryption err in fragmented rx\n");
+ dev_kfree_skb_any(info.skb);
+ goto end;
+ }
+
+ if (info.encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ paramlen = ath10k_htt_rx_crypto_param_len(info.encrypt_type);
+
+ /* It is more efficient to move the header than the payload */
+ memmove((void *)info.skb->data + paramlen,
+ (void *)info.skb->data,
+ hdrlen);
+ skb_pull(info.skb, paramlen);
+ hdr = (struct ieee80211_hdr *)info.skb->data;
+ }
+
+ /* remove trailing FCS */
+ trim = 4;
+
+ /* remove crypto trailer */
+ trim += ath10k_htt_rx_crypto_tail_len(info.encrypt_type);
+
+ /* last fragment of TKIP frags has MIC */
+ if (!ieee80211_has_morefrags(hdr->frame_control) &&
+ info.encrypt_type == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
+ trim += 8;
+
+ if (trim > info.skb->len) {
+ ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n");
+ dev_kfree_skb_any(info.skb);
+ goto end;
+ }
+
+ skb_trim(info.skb, info.skb->len - trim);
+
+ ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt frag mpdu: ",
+ info.skb->data, info.skb->len);
+ ath10k_process_rx(htt->ar, &info);
+
+end:
+ if (fw_desc_len > 0) {
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "expecting more fragmented rx in one indication %d\n",
+ fw_desc_len);
+ }
+}
+
+void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ath10k_htt *htt = ar->htt;
+ struct htt_resp *resp = (struct htt_resp *)skb->data;
+
+ /* confirm alignment */
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
+ ath10k_warn("unaligned htt message, expect trouble\n");
+
+ ath10k_dbg(ATH10K_DBG_HTT, "HTT RX, msg_type: 0x%0X\n",
+ resp->hdr.msg_type);
+ switch (resp->hdr.msg_type) {
+ case HTT_T2H_MSG_TYPE_VERSION_CONF: {
+ htt->target_version_major = resp->ver_resp.major;
+ htt->target_version_minor = resp->ver_resp.minor;
+ complete(&htt->target_version_received);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_RX_IND: {
+ ath10k_htt_rx_handler(htt, &resp->rx_ind);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_PEER_MAP: {
+ struct htt_peer_map_event ev = {
+ .vdev_id = resp->peer_map.vdev_id,
+ .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
+ };
+ memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
+ ath10k_peer_map_event(htt, &ev);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
+ struct htt_peer_unmap_event ev = {
+ .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
+ };
+ ath10k_peer_unmap_event(htt, &ev);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
+ struct htt_tx_done tx_done = {};
+ int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
+
+ tx_done.msdu_id =
+ __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
+
+ switch (status) {
+ case HTT_MGMT_TX_STATUS_OK:
+ break;
+ case HTT_MGMT_TX_STATUS_RETRY:
+ tx_done.no_ack = true;
+ break;
+ case HTT_MGMT_TX_STATUS_DROP:
+ tx_done.discard = true;
+ break;
+ }
+
+ ath10k_txrx_tx_completed(htt, &tx_done);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_TX_COMPL_IND: {
+ struct htt_tx_done tx_done = {};
+ int status = MS(resp->data_tx_completion.flags,
+ HTT_DATA_TX_STATUS);
+ __le16 msdu_id;
+ int i;
+
+ switch (status) {
+ case HTT_DATA_TX_STATUS_NO_ACK:
+ tx_done.no_ack = true;
+ break;
+ case HTT_DATA_TX_STATUS_OK:
+ break;
+ case HTT_DATA_TX_STATUS_DISCARD:
+ case HTT_DATA_TX_STATUS_POSTPONE:
+ case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
+ tx_done.discard = true;
+ break;
+ default:
+ ath10k_warn("unhandled tx completion status %d\n",
+ status);
+ tx_done.discard = true;
+ break;
+ }
+
+ ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
+ resp->data_tx_completion.num_msdus);
+
+ for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
+ msdu_id = resp->data_tx_completion.msdus[i];
+ tx_done.msdu_id = __le16_to_cpu(msdu_id);
+ ath10k_txrx_tx_completed(htt, &tx_done);
+ }
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_SEC_IND: {
+ struct ath10k *ar = htt->ar;
+ struct htt_security_indication *ev = &resp->security_indication;
+
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "sec ind peer_id %d unicast %d type %d\n",
+ __le16_to_cpu(ev->peer_id),
+ !!(ev->flags & HTT_SECURITY_IS_UNICAST),
+ MS(ev->flags, HTT_SECURITY_TYPE));
+ complete(&ar->install_key_done);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
+ ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
+ skb->data, skb->len);
+ ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_TEST:
+ /* FIX THIS */
+ break;
+ case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
+ case HTT_T2H_MSG_TYPE_STATS_CONF:
+ case HTT_T2H_MSG_TYPE_RX_ADDBA:
+ case HTT_T2H_MSG_TYPE_RX_DELBA:
+ case HTT_T2H_MSG_TYPE_RX_FLUSH:
+ default:
+ ath10k_dbg(ATH10K_DBG_HTT, "htt event (%d) not handled\n",
+ resp->hdr.msg_type);
+ ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
+ skb->data, skb->len);
+ break;
+ };
+
+ /* Free the indication buffer */
+ dev_kfree_skb_any(skb);
+}
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
new file mode 100644
index 000000000000..ef79106db247
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -0,0 +1,510 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include "htt.h"
+#include "mac.h"
+#include "hif.h"
+#include "txrx.h"
+#include "debug.h"
+
+void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
+{
+ htt->num_pending_tx--;
+ if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
+ ieee80211_wake_queues(htt->ar->hw);
+}
+
+static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
+{
+ spin_lock_bh(&htt->tx_lock);
+ __ath10k_htt_tx_dec_pending(htt);
+ spin_unlock_bh(&htt->tx_lock);
+}
+
+static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
+{
+ int ret = 0;
+
+ spin_lock_bh(&htt->tx_lock);
+
+ if (htt->num_pending_tx >= htt->max_num_pending_tx) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ htt->num_pending_tx++;
+ if (htt->num_pending_tx == htt->max_num_pending_tx)
+ ieee80211_stop_queues(htt->ar->hw);
+
+exit:
+ spin_unlock_bh(&htt->tx_lock);
+ return ret;
+}
+
+int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt)
+{
+ int msdu_id;
+
+ lockdep_assert_held(&htt->tx_lock);
+
+ msdu_id = find_first_zero_bit(htt->used_msdu_ids,
+ htt->max_num_pending_tx);
+ if (msdu_id == htt->max_num_pending_tx)
+ return -ENOBUFS;
+
+ ath10k_dbg(ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id);
+ __set_bit(msdu_id, htt->used_msdu_ids);
+ return msdu_id;
+}
+
+void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
+{
+ lockdep_assert_held(&htt->tx_lock);
+
+ if (!test_bit(msdu_id, htt->used_msdu_ids))
+ ath10k_warn("trying to free unallocated msdu_id %d\n", msdu_id);
+
+ ath10k_dbg(ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
+ __clear_bit(msdu_id, htt->used_msdu_ids);
+}
+
+int ath10k_htt_tx_attach(struct ath10k_htt *htt)
+{
+ u8 pipe;
+
+ spin_lock_init(&htt->tx_lock);
+ init_waitqueue_head(&htt->empty_tx_wq);
+
+ /* At the beginning free queue number should hint us the maximum
+ * queue length */
+ pipe = htt->ar->htc->endpoint[htt->eid].ul_pipe_id;
+ htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar,
+ pipe);
+
+ ath10k_dbg(ATH10K_DBG_HTT, "htt tx max num pending tx %d\n",
+ htt->max_num_pending_tx);
+
+ htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) *
+ htt->max_num_pending_tx, GFP_KERNEL);
+ if (!htt->pending_tx)
+ return -ENOMEM;
+
+ htt->used_msdu_ids = kzalloc(sizeof(unsigned long) *
+ BITS_TO_LONGS(htt->max_num_pending_tx),
+ GFP_KERNEL);
+ if (!htt->used_msdu_ids) {
+ kfree(htt->pending_tx);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
+{
+ struct sk_buff *txdesc;
+ int msdu_id;
+
+ /* No locks needed. Called after communication with the device has
+ * been stopped. */
+
+ for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {
+ if (!test_bit(msdu_id, htt->used_msdu_ids))
+ continue;
+
+ txdesc = htt->pending_tx[msdu_id];
+ if (!txdesc)
+ continue;
+
+ ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",
+ msdu_id);
+
+ if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0)
+ ATH10K_SKB_CB(txdesc)->htt.refcount = 1;
+
+ ATH10K_SKB_CB(txdesc)->htt.discard = true;
+ ath10k_txrx_tx_unref(htt, txdesc);
+ }
+}
+
+void ath10k_htt_tx_detach(struct ath10k_htt *htt)
+{
+ ath10k_htt_tx_cleanup_pending(htt);
+ kfree(htt->pending_tx);
+ kfree(htt->used_msdu_ids);
+ return;
+}
+
+void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+ struct ath10k_htt *htt = ar->htt;
+
+ if (skb_cb->htt.is_conf) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ if (skb_cb->is_aborted) {
+ skb_cb->htt.discard = true;
+
+ /* if the skbuff is aborted we need to make sure we'll free up
+ * the tx resources, we can't simply run tx_unref() 2 times
+ * because if htt tx completion came in earlier we'd access
+ * unallocated memory */
+ if (skb_cb->htt.refcount > 1)
+ skb_cb->htt.refcount = 1;
+ }
+
+ ath10k_txrx_tx_unref(htt, skb);
+}
+
+int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
+{
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ int len = 0;
+ int ret;
+
+ len += sizeof(cmd->hdr);
+ len += sizeof(cmd->ver_req);
+
+ skb = ath10k_htc_alloc_skb(len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_cmd *)skb->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
+
+ ATH10K_SKB_CB(skb)->htt.is_conf = true;
+
+ ret = ath10k_htc_send(htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
+{
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ struct htt_rx_ring_setup_ring *ring;
+ const int num_rx_ring = 1;
+ u16 flags;
+ u32 fw_idx;
+ int len;
+ int ret;
+
+ /*
+ * the HW expects the buffer to be an integral number of 4-byte
+ * "words"
+ */
+ BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
+ BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
+
+ len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
+ + (sizeof(*ring) * num_rx_ring);
+ skb = ath10k_htc_alloc_skb(len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+
+ cmd = (struct htt_cmd *)skb->data;
+ ring = &cmd->rx_setup.rings[0];
+
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
+ cmd->rx_setup.hdr.num_rings = 1;
+
+ /* FIXME: do we need all of this? */
+ flags = 0;
+ flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
+ flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
+ flags |= HTT_RX_RING_FLAGS_PPDU_START;
+ flags |= HTT_RX_RING_FLAGS_PPDU_END;
+ flags |= HTT_RX_RING_FLAGS_MPDU_START;
+ flags |= HTT_RX_RING_FLAGS_MPDU_END;
+ flags |= HTT_RX_RING_FLAGS_MSDU_START;
+ flags |= HTT_RX_RING_FLAGS_MSDU_END;
+ flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
+ flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
+ flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
+ flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
+ flags |= HTT_RX_RING_FLAGS_CTRL_RX;
+ flags |= HTT_RX_RING_FLAGS_MGMT_RX;
+ flags |= HTT_RX_RING_FLAGS_NULL_RX;
+ flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
+
+ fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
+
+ ring->fw_idx_shadow_reg_paddr =
+ __cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
+ ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
+ ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
+ ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
+ ring->flags = __cpu_to_le16(flags);
+ ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
+
+#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
+
+ ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
+ ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
+ ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
+ ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
+ ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
+ ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
+ ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
+ ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
+ ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
+ ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+
+#undef desc_offset
+
+ ATH10K_SKB_CB(skb)->htt.is_conf = true;
+
+ ret = ath10k_htc_send(htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
+{
+ struct device *dev = htt->ar->dev;
+ struct ath10k_skb_cb *skb_cb;
+ struct sk_buff *txdesc = NULL;
+ struct htt_cmd *cmd;
+ u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id;
+ int len = 0;
+ int msdu_id = -1;
+ int res;
+
+
+ res = ath10k_htt_tx_inc_pending(htt);
+ if (res)
+ return res;
+
+ len += sizeof(cmd->hdr);
+ len += sizeof(cmd->mgmt_tx);
+
+ txdesc = ath10k_htc_alloc_skb(len);
+ if (!txdesc) {
+ res = -ENOMEM;
+ goto err;
+ }
+
+ spin_lock_bh(&htt->tx_lock);
+ msdu_id = ath10k_htt_tx_alloc_msdu_id(htt);
+ if (msdu_id < 0) {
+ spin_unlock_bh(&htt->tx_lock);
+ res = msdu_id;
+ goto err;
+ }
+ htt->pending_tx[msdu_id] = txdesc;
+ spin_unlock_bh(&htt->tx_lock);
+
+ res = ath10k_skb_map(dev, msdu);
+ if (res)
+ goto err;
+
+ skb_put(txdesc, len);
+ cmd = (struct htt_cmd *)txdesc->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX;
+ cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
+ cmd->mgmt_tx.len = __cpu_to_le32(msdu->len);
+ cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id);
+ cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id);
+ memcpy(cmd->mgmt_tx.hdr, msdu->data,
+ min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
+
+ /* refcount is decremented by HTC and HTT completions until it reaches
+ * zero and is freed */
+ skb_cb = ATH10K_SKB_CB(txdesc);
+ skb_cb->htt.msdu_id = msdu_id;
+ skb_cb->htt.refcount = 2;
+ skb_cb->htt.msdu = msdu;
+
+ res = ath10k_htc_send(htt->ar->htc, htt->eid, txdesc);
+ if (res)
+ goto err;
+
+ return 0;
+
+err:
+ ath10k_skb_unmap(dev, msdu);
+
+ if (txdesc)
+ dev_kfree_skb_any(txdesc);
+ if (msdu_id >= 0) {
+ spin_lock_bh(&htt->tx_lock);
+ htt->pending_tx[msdu_id] = NULL;
+ ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+ spin_unlock_bh(&htt->tx_lock);
+ }
+ ath10k_htt_tx_dec_pending(htt);
+ return res;
+}
+
+int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
+{
+ struct device *dev = htt->ar->dev;
+ struct htt_cmd *cmd;
+ struct htt_data_tx_desc_frag *tx_frags;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+ struct ath10k_skb_cb *skb_cb;
+ struct sk_buff *txdesc = NULL;
+ struct sk_buff *txfrag = NULL;
+ u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id;
+ u8 tid;
+ int prefetch_len, desc_len, frag_len;
+ dma_addr_t frags_paddr;
+ int msdu_id = -1;
+ int res;
+ u8 flags0;
+ u16 flags1;
+
+ res = ath10k_htt_tx_inc_pending(htt);
+ if (res)
+ return res;
+
+ prefetch_len = min(htt->prefetch_len, msdu->len);
+ prefetch_len = roundup(prefetch_len, 4);
+
+ desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len;
+ frag_len = sizeof(*tx_frags) * 2;
+
+ txdesc = ath10k_htc_alloc_skb(desc_len);
+ if (!txdesc) {
+ res = -ENOMEM;
+ goto err;
+ }
+
+ txfrag = dev_alloc_skb(frag_len);
+ if (!txfrag) {
+ res = -ENOMEM;
+ goto err;
+ }
+
+ if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) {
+ ath10k_warn("htt alignment check failed. dropping packet.\n");
+ res = -EIO;
+ goto err;
+ }
+
+ spin_lock_bh(&htt->tx_lock);
+ msdu_id = ath10k_htt_tx_alloc_msdu_id(htt);
+ if (msdu_id < 0) {
+ spin_unlock_bh(&htt->tx_lock);
+ res = msdu_id;
+ goto err;
+ }
+ htt->pending_tx[msdu_id] = txdesc;
+ spin_unlock_bh(&htt->tx_lock);
+
+ res = ath10k_skb_map(dev, msdu);
+ if (res)
+ goto err;
+
+ /* tx fragment list must be terminated with zero-entry */
+ skb_put(txfrag, frag_len);
+ tx_frags = (struct htt_data_tx_desc_frag *)txfrag->data;
+ tx_frags[0].paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
+ tx_frags[0].len = __cpu_to_le32(msdu->len);
+ tx_frags[1].paddr = __cpu_to_le32(0);
+ tx_frags[1].len = __cpu_to_le32(0);
+
+ res = ath10k_skb_map(dev, txfrag);
+ if (res)
+ goto err;
+
+ ath10k_dbg(ATH10K_DBG_HTT, "txfrag 0x%llx msdu 0x%llx\n",
+ (unsigned long long) ATH10K_SKB_CB(txfrag)->paddr,
+ (unsigned long long) ATH10K_SKB_CB(msdu)->paddr);
+ ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "txfrag: ",
+ txfrag->data, frag_len);
+ ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ",
+ msdu->data, msdu->len);
+
+ skb_put(txdesc, desc_len);
+ cmd = (struct htt_cmd *)txdesc->data;
+ memset(cmd, 0, desc_len);
+
+ tid = ATH10K_SKB_CB(msdu)->htt.tid;
+
+ ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid);
+
+ flags0 = 0;
+ if (!ieee80211_has_protected(hdr->frame_control))
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+ flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
+ HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+
+ flags1 = 0;
+ flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
+ flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
+
+ frags_paddr = ATH10K_SKB_CB(txfrag)->paddr;
+
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
+ cmd->data_tx.flags0 = flags0;
+ cmd->data_tx.flags1 = __cpu_to_le16(flags1);
+ cmd->data_tx.len = __cpu_to_le16(msdu->len);
+ cmd->data_tx.id = __cpu_to_le16(msdu_id);
+ cmd->data_tx.frags_paddr = __cpu_to_le32(frags_paddr);
+ cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
+
+ memcpy(cmd->data_tx.prefetch, msdu->data, prefetch_len);
+
+ /* refcount is decremented by HTC and HTT completions until it reaches
+ * zero and is freed */
+ skb_cb = ATH10K_SKB_CB(txdesc);
+ skb_cb->htt.msdu_id = msdu_id;
+ skb_cb->htt.refcount = 2;
+ skb_cb->htt.txfrag = txfrag;
+ skb_cb->htt.msdu = msdu;
+
+ res = ath10k_htc_send(htt->ar->htc, htt->eid, txdesc);
+ if (res)
+ goto err;
+
+ return 0;
+err:
+ if (txfrag)
+ ath10k_skb_unmap(dev, txfrag);
+ if (txdesc)
+ dev_kfree_skb_any(txdesc);
+ if (txfrag)
+ dev_kfree_skb_any(txfrag);
+ if (msdu_id >= 0) {
+ spin_lock_bh(&htt->tx_lock);
+ htt->pending_tx[msdu_id] = NULL;
+ ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+ spin_unlock_bh(&htt->tx_lock);
+ }
+ ath10k_htt_tx_dec_pending(htt);
+ ath10k_skb_unmap(dev, msdu);
+ return res;
+}
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
new file mode 100644
index 000000000000..44ed5af0a204
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HW_H_
+#define _HW_H_
+
+#include "targaddrs.h"
+
+/* Supported FW version */
+#define SUPPORTED_FW_MAJOR 1
+#define SUPPORTED_FW_MINOR 0
+#define SUPPORTED_FW_RELEASE 0
+#define SUPPORTED_FW_BUILD 629
+
+/* QCA988X 1.0 definitions */
+#define QCA988X_HW_1_0_VERSION 0x4000002c
+#define QCA988X_HW_1_0_FW_DIR "ath10k/QCA988X/hw1.0"
+#define QCA988X_HW_1_0_FW_FILE "firmware.bin"
+#define QCA988X_HW_1_0_OTP_FILE "otp.bin"
+#define QCA988X_HW_1_0_BOARD_DATA_FILE "board.bin"
+#define QCA988X_HW_1_0_PATCH_LOAD_ADDR 0x1234
+
+/* QCA988X 2.0 definitions */
+#define QCA988X_HW_2_0_VERSION 0x4100016c
+#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0"
+#define QCA988X_HW_2_0_FW_FILE "firmware.bin"
+#define QCA988X_HW_2_0_OTP_FILE "otp.bin"
+#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
+#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
+
+/* Known pecularities:
+ * - current FW doesn't support raw rx mode (last tested v599)
+ * - current FW dumps upon raw tx mode (last tested v599)
+ * - raw appears in nwifi decap, raw and nwifi appear in ethernet decap
+ * - raw have FCS, nwifi doesn't
+ * - ethernet frames have 802.11 header decapped and parts (base hdr, cipher
+ * param, llc/snap) are aligned to 4byte boundaries each */
+enum ath10k_hw_txrx_mode {
+ ATH10K_HW_TXRX_RAW = 0,
+ ATH10K_HW_TXRX_NATIVE_WIFI = 1,
+ ATH10K_HW_TXRX_ETHERNET = 2,
+};
+
+enum ath10k_mcast2ucast_mode {
+ ATH10K_MCAST2UCAST_DISABLED = 0,
+ ATH10K_MCAST2UCAST_ENABLED = 1,
+};
+
+#define TARGET_NUM_VDEVS 8
+#define TARGET_NUM_PEER_AST 2
+#define TARGET_NUM_WDS_ENTRIES 32
+#define TARGET_DMA_BURST_SIZE 0
+#define TARGET_MAC_AGGR_DELIM 0
+#define TARGET_AST_SKID_LIMIT 16
+#define TARGET_NUM_PEERS 16
+#define TARGET_NUM_OFFLOAD_PEERS 0
+#define TARGET_NUM_OFFLOAD_REORDER_BUFS 0
+#define TARGET_NUM_PEER_KEYS 2
+#define TARGET_NUM_TIDS (2 * ((TARGET_NUM_PEERS) + (TARGET_NUM_VDEVS)))
+#define TARGET_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
+#define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
+#define TARGET_RX_TIMEOUT_LO_PRI 100
+#define TARGET_RX_TIMEOUT_HI_PRI 40
+#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_ETHERNET
+#define TARGET_SCAN_MAX_PENDING_REQS 4
+#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3
+#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3
+#define TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES 8
+#define TARGET_GTK_OFFLOAD_MAX_VDEV 3
+#define TARGET_NUM_MCAST_GROUPS 0
+#define TARGET_NUM_MCAST_TABLE_ELEMS 0
+#define TARGET_MCAST2UCAST_MODE ATH10K_MCAST2UCAST_DISABLED
+#define TARGET_TX_DBG_LOG_SIZE 1024
+#define TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 0
+#define TARGET_VOW_CONFIG 0
+#define TARGET_NUM_MSDU_DESC (1024 + 400)
+#define TARGET_MAX_FRAG_ENTRIES 0
+
+
+/* Number of Copy Engines supported */
+#define CE_COUNT 8
+
+/*
+ * Total number of PCIe MSI interrupts requested for all interrupt sources.
+ * PCIe standard forces this to be a power of 2.
+ * Some Host OS's limit MSI requests that can be granted to 8
+ * so for now we abide by this limit and avoid requesting more
+ * than that.
+ */
+#define MSI_NUM_REQUEST_LOG2 3
+#define MSI_NUM_REQUEST (1<<MSI_NUM_REQUEST_LOG2)
+
+/*
+ * Granted MSIs are assigned as follows:
+ * Firmware uses the first
+ * Remaining MSIs, if any, are used by Copy Engines
+ * This mapping is known to both Target firmware and Host software.
+ * It may be changed as long as Host and Target are kept in sync.
+ */
+/* MSI for firmware (errors, etc.) */
+#define MSI_ASSIGN_FW 0
+
+/* MSIs for Copy Engines */
+#define MSI_ASSIGN_CE_INITIAL 1
+#define MSI_ASSIGN_CE_MAX 7
+
+/* as of IP3.7.1 */
+#define RTC_STATE_V_ON 3
+
+#define RTC_STATE_COLD_RESET_MASK 0x00000400
+#define RTC_STATE_V_LSB 0
+#define RTC_STATE_V_MASK 0x00000007
+#define RTC_STATE_ADDRESS 0x0000
+#define PCIE_SOC_WAKE_V_MASK 0x00000001
+#define PCIE_SOC_WAKE_ADDRESS 0x0004
+#define PCIE_SOC_WAKE_RESET 0x00000000
+#define SOC_GLOBAL_RESET_ADDRESS 0x0008
+
+#define RTC_SOC_BASE_ADDRESS 0x00004000
+#define RTC_WMAC_BASE_ADDRESS 0x00005000
+#define MAC_COEX_BASE_ADDRESS 0x00006000
+#define BT_COEX_BASE_ADDRESS 0x00007000
+#define SOC_PCIE_BASE_ADDRESS 0x00008000
+#define SOC_CORE_BASE_ADDRESS 0x00009000
+#define WLAN_UART_BASE_ADDRESS 0x0000c000
+#define WLAN_SI_BASE_ADDRESS 0x00010000
+#define WLAN_GPIO_BASE_ADDRESS 0x00014000
+#define WLAN_ANALOG_INTF_BASE_ADDRESS 0x0001c000
+#define WLAN_MAC_BASE_ADDRESS 0x00020000
+#define EFUSE_BASE_ADDRESS 0x00030000
+#define FPGA_REG_BASE_ADDRESS 0x00039000
+#define WLAN_UART2_BASE_ADDRESS 0x00054c00
+#define CE_WRAPPER_BASE_ADDRESS 0x00057000
+#define CE0_BASE_ADDRESS 0x00057400
+#define CE1_BASE_ADDRESS 0x00057800
+#define CE2_BASE_ADDRESS 0x00057c00
+#define CE3_BASE_ADDRESS 0x00058000
+#define CE4_BASE_ADDRESS 0x00058400
+#define CE5_BASE_ADDRESS 0x00058800
+#define CE6_BASE_ADDRESS 0x00058c00
+#define CE7_BASE_ADDRESS 0x00059000
+#define DBI_BASE_ADDRESS 0x00060000
+#define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000
+#define PCIE_LOCAL_BASE_ADDRESS 0x00080000
+
+#define SOC_RESET_CONTROL_OFFSET 0x00000000
+#define SOC_RESET_CONTROL_SI0_RST_MASK 0x00000001
+#define SOC_CPU_CLOCK_OFFSET 0x00000020
+#define SOC_CPU_CLOCK_STANDARD_LSB 0
+#define SOC_CPU_CLOCK_STANDARD_MASK 0x00000003
+#define SOC_CLOCK_CONTROL_OFFSET 0x00000028
+#define SOC_CLOCK_CONTROL_SI0_CLK_MASK 0x00000001
+#define SOC_SYSTEM_SLEEP_OFFSET 0x000000c4
+#define SOC_LPO_CAL_OFFSET 0x000000e0
+#define SOC_LPO_CAL_ENABLE_LSB 20
+#define SOC_LPO_CAL_ENABLE_MASK 0x00100000
+
+#define WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000008
+#define WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000004
+#define WLAN_SYSTEM_SLEEP_DISABLE_LSB 0
+#define WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001
+
+#define WLAN_GPIO_PIN0_ADDRESS 0x00000028
+#define WLAN_GPIO_PIN0_CONFIG_MASK 0x00007800
+#define WLAN_GPIO_PIN1_ADDRESS 0x0000002c
+#define WLAN_GPIO_PIN1_CONFIG_MASK 0x00007800
+#define WLAN_GPIO_PIN10_ADDRESS 0x00000050
+#define WLAN_GPIO_PIN11_ADDRESS 0x00000054
+#define WLAN_GPIO_PIN12_ADDRESS 0x00000058
+#define WLAN_GPIO_PIN13_ADDRESS 0x0000005c
+
+#define CLOCK_GPIO_OFFSET 0xffffffff
+#define CLOCK_GPIO_BT_CLK_OUT_EN_LSB 0
+#define CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0
+
+#define SI_CONFIG_OFFSET 0x00000000
+#define SI_CONFIG_BIDIR_OD_DATA_LSB 18
+#define SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000
+#define SI_CONFIG_I2C_LSB 16
+#define SI_CONFIG_I2C_MASK 0x00010000
+#define SI_CONFIG_POS_SAMPLE_LSB 7
+#define SI_CONFIG_POS_SAMPLE_MASK 0x00000080
+#define SI_CONFIG_INACTIVE_DATA_LSB 5
+#define SI_CONFIG_INACTIVE_DATA_MASK 0x00000020
+#define SI_CONFIG_INACTIVE_CLK_LSB 4
+#define SI_CONFIG_INACTIVE_CLK_MASK 0x00000010
+#define SI_CONFIG_DIVIDER_LSB 0
+#define SI_CONFIG_DIVIDER_MASK 0x0000000f
+#define SI_CS_OFFSET 0x00000004
+#define SI_CS_DONE_ERR_MASK 0x00000400
+#define SI_CS_DONE_INT_MASK 0x00000200
+#define SI_CS_START_LSB 8
+#define SI_CS_START_MASK 0x00000100
+#define SI_CS_RX_CNT_LSB 4
+#define SI_CS_RX_CNT_MASK 0x000000f0
+#define SI_CS_TX_CNT_LSB 0
+#define SI_CS_TX_CNT_MASK 0x0000000f
+
+#define SI_TX_DATA0_OFFSET 0x00000008
+#define SI_TX_DATA1_OFFSET 0x0000000c
+#define SI_RX_DATA0_OFFSET 0x00000010
+#define SI_RX_DATA1_OFFSET 0x00000014
+
+#define CORE_CTRL_CPU_INTR_MASK 0x00002000
+#define CORE_CTRL_ADDRESS 0x0000
+#define PCIE_INTR_ENABLE_ADDRESS 0x0008
+#define PCIE_INTR_CLR_ADDRESS 0x0014
+#define SCRATCH_3_ADDRESS 0x0030
+
+/* Firmware indications to the Host via SCRATCH_3 register. */
+#define FW_INDICATOR_ADDRESS (SOC_CORE_BASE_ADDRESS + SCRATCH_3_ADDRESS)
+#define FW_IND_EVENT_PENDING 1
+#define FW_IND_INITIALIZED 2
+
+/* HOST_REG interrupt from firmware */
+#define PCIE_INTR_FIRMWARE_MASK 0x00000400
+#define PCIE_INTR_CE_MASK_ALL 0x0007f800
+
+#define DRAM_BASE_ADDRESS 0x00400000
+
+#define MISSING 0
+
+#define SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET
+#define WLAN_SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET
+#define WLAN_RESET_CONTROL_OFFSET SOC_RESET_CONTROL_OFFSET
+#define CLOCK_CONTROL_OFFSET SOC_CLOCK_CONTROL_OFFSET
+#define CLOCK_CONTROL_SI0_CLK_MASK SOC_CLOCK_CONTROL_SI0_CLK_MASK
+#define RESET_CONTROL_MBOX_RST_MASK MISSING
+#define RESET_CONTROL_SI0_RST_MASK SOC_RESET_CONTROL_SI0_RST_MASK
+#define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS
+#define GPIO_PIN0_OFFSET WLAN_GPIO_PIN0_ADDRESS
+#define GPIO_PIN1_OFFSET WLAN_GPIO_PIN1_ADDRESS
+#define GPIO_PIN0_CONFIG_MASK WLAN_GPIO_PIN0_CONFIG_MASK
+#define GPIO_PIN1_CONFIG_MASK WLAN_GPIO_PIN1_CONFIG_MASK
+#define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS
+#define SCRATCH_BASE_ADDRESS SOC_CORE_BASE_ADDRESS
+#define LOCAL_SCRATCH_OFFSET 0x18
+#define CPU_CLOCK_OFFSET SOC_CPU_CLOCK_OFFSET
+#define LPO_CAL_OFFSET SOC_LPO_CAL_OFFSET
+#define GPIO_PIN10_OFFSET WLAN_GPIO_PIN10_ADDRESS
+#define GPIO_PIN11_OFFSET WLAN_GPIO_PIN11_ADDRESS
+#define GPIO_PIN12_OFFSET WLAN_GPIO_PIN12_ADDRESS
+#define GPIO_PIN13_OFFSET WLAN_GPIO_PIN13_ADDRESS
+#define CPU_CLOCK_STANDARD_LSB SOC_CPU_CLOCK_STANDARD_LSB
+#define CPU_CLOCK_STANDARD_MASK SOC_CPU_CLOCK_STANDARD_MASK
+#define LPO_CAL_ENABLE_LSB SOC_LPO_CAL_ENABLE_LSB
+#define LPO_CAL_ENABLE_MASK SOC_LPO_CAL_ENABLE_MASK
+#define ANALOG_INTF_BASE_ADDRESS WLAN_ANALOG_INTF_BASE_ADDRESS
+#define MBOX_BASE_ADDRESS MISSING
+#define INT_STATUS_ENABLE_ERROR_LSB MISSING
+#define INT_STATUS_ENABLE_ERROR_MASK MISSING
+#define INT_STATUS_ENABLE_CPU_LSB MISSING
+#define INT_STATUS_ENABLE_CPU_MASK MISSING
+#define INT_STATUS_ENABLE_COUNTER_LSB MISSING
+#define INT_STATUS_ENABLE_COUNTER_MASK MISSING
+#define INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING
+#define INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING
+#define COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING
+#define COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING
+#define INT_STATUS_ENABLE_ADDRESS MISSING
+#define CPU_INT_STATUS_ENABLE_BIT_LSB MISSING
+#define CPU_INT_STATUS_ENABLE_BIT_MASK MISSING
+#define HOST_INT_STATUS_ADDRESS MISSING
+#define CPU_INT_STATUS_ADDRESS MISSING
+#define ERROR_INT_STATUS_ADDRESS MISSING
+#define ERROR_INT_STATUS_WAKEUP_MASK MISSING
+#define ERROR_INT_STATUS_WAKEUP_LSB MISSING
+#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING
+#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING
+#define ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING
+#define ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING
+#define COUNT_DEC_ADDRESS MISSING
+#define HOST_INT_STATUS_CPU_MASK MISSING
+#define HOST_INT_STATUS_CPU_LSB MISSING
+#define HOST_INT_STATUS_ERROR_MASK MISSING
+#define HOST_INT_STATUS_ERROR_LSB MISSING
+#define HOST_INT_STATUS_COUNTER_MASK MISSING
+#define HOST_INT_STATUS_COUNTER_LSB MISSING
+#define RX_LOOKAHEAD_VALID_ADDRESS MISSING
+#define WINDOW_DATA_ADDRESS MISSING
+#define WINDOW_READ_ADDR_ADDRESS MISSING
+#define WINDOW_WRITE_ADDR_ADDRESS MISSING
+
+#define RTC_STATE_V_GET(x) (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB)
+
+#endif /* _HW_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
new file mode 100644
index 000000000000..da5c333d0d4b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -0,0 +1,3069 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mac.h"
+
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+
+#include "core.h"
+#include "debug.h"
+#include "wmi.h"
+#include "htt.h"
+#include "txrx.h"
+
+/**********/
+/* Crypto */
+/**********/
+
+static int ath10k_send_key(struct ath10k_vif *arvif,
+ struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd,
+ const u8 *macaddr)
+{
+ struct wmi_vdev_install_key_arg arg = {
+ .vdev_id = arvif->vdev_id,
+ .key_idx = key->keyidx,
+ .key_len = key->keylen,
+ .key_data = key->key,
+ .macaddr = macaddr,
+ };
+
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ arg.key_flags = WMI_KEY_PAIRWISE;
+ else
+ arg.key_flags = WMI_KEY_GROUP;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ arg.key_cipher = WMI_CIPHER_AES_CCM;
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ arg.key_cipher = WMI_CIPHER_TKIP;
+ arg.key_txmic_len = 8;
+ arg.key_rxmic_len = 8;
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ arg.key_cipher = WMI_CIPHER_WEP;
+ /* AP/IBSS mode requires self-key to be groupwise
+ * Otherwise pairwise key must be set */
+ if (memcmp(macaddr, arvif->vif->addr, ETH_ALEN))
+ arg.key_flags = WMI_KEY_PAIRWISE;
+ break;
+ default:
+ ath10k_warn("cipher %d is not supported\n", key->cipher);
+ return -EOPNOTSUPP;
+ }
+
+ if (cmd == DISABLE_KEY) {
+ arg.key_cipher = WMI_CIPHER_NONE;
+ arg.key_data = NULL;
+ }
+
+ return ath10k_wmi_vdev_install_key(arvif->ar, &arg);
+}
+
+static int ath10k_install_key(struct ath10k_vif *arvif,
+ struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd,
+ const u8 *macaddr)
+{
+ struct ath10k *ar = arvif->ar;
+ int ret;
+
+ INIT_COMPLETION(ar->install_key_done);
+
+ ret = ath10k_send_key(arvif, key, cmd, macaddr);
+ if (ret)
+ return ret;
+
+ ret = wait_for_completion_timeout(&ar->install_key_done, 3*HZ);
+ if (ret == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
+ const u8 *addr)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ath10k_peer *peer;
+ int ret;
+ int i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!peer)
+ return -ENOENT;
+
+ for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
+ if (arvif->wep_keys[i] == NULL)
+ continue;
+
+ ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
+ addr);
+ if (ret)
+ return ret;
+
+ peer->keys[i] = arvif->wep_keys[i];
+ }
+
+ return 0;
+}
+
+static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
+ const u8 *addr)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ath10k_peer *peer;
+ int first_errno = 0;
+ int ret;
+ int i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!peer)
+ return -ENOENT;
+
+ for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+ if (peer->keys[i] == NULL)
+ continue;
+
+ ret = ath10k_install_key(arvif, peer->keys[i],
+ DISABLE_KEY, addr);
+ if (ret && first_errno == 0)
+ first_errno = ret;
+
+ if (ret)
+ ath10k_warn("could not remove peer wep key %d (%d)\n",
+ i, ret);
+
+ peer->keys[i] = NULL;
+ }
+
+ return first_errno;
+}
+
+static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
+ struct ieee80211_key_conf *key)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ath10k_peer *peer;
+ u8 addr[ETH_ALEN];
+ int first_errno = 0;
+ int ret;
+ int i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ for (;;) {
+ /* since ath10k_install_key we can't hold data_lock all the
+ * time, so we try to remove the keys incrementally */
+ spin_lock_bh(&ar->data_lock);
+ i = 0;
+ list_for_each_entry(peer, &ar->peers, list) {
+ for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+ if (peer->keys[i] == key) {
+ memcpy(addr, peer->addr, ETH_ALEN);
+ peer->keys[i] = NULL;
+ break;
+ }
+ }
+
+ if (i < ARRAY_SIZE(peer->keys))
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ if (i == ARRAY_SIZE(peer->keys))
+ break;
+
+ ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr);
+ if (ret && first_errno == 0)
+ first_errno = ret;
+
+ if (ret)
+ ath10k_warn("could not remove key for %pM\n", addr);
+ }
+
+ return first_errno;
+}
+
+
+/*********************/
+/* General utilities */
+/*********************/
+
+static inline enum wmi_phy_mode
+chan_to_phymode(const struct cfg80211_chan_def *chandef)
+{
+ enum wmi_phy_mode phymode = MODE_UNKNOWN;
+
+ switch (chandef->chan->band) {
+ case IEEE80211_BAND_2GHZ:
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ phymode = MODE_11G;
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ phymode = MODE_11NG_HT20;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ phymode = MODE_11NG_HT40;
+ break;
+ case NL80211_CHAN_WIDTH_5:
+ case NL80211_CHAN_WIDTH_10:
+ case NL80211_CHAN_WIDTH_80:
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ phymode = MODE_UNKNOWN;
+ break;
+ }
+ break;
+ case IEEE80211_BAND_5GHZ:
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ phymode = MODE_11A;
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ phymode = MODE_11NA_HT20;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ phymode = MODE_11NA_HT40;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ phymode = MODE_11AC_VHT80;
+ break;
+ case NL80211_CHAN_WIDTH_5:
+ case NL80211_CHAN_WIDTH_10:
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ phymode = MODE_UNKNOWN;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ WARN_ON(phymode == MODE_UNKNOWN);
+ return phymode;
+}
+
+static u8 ath10k_parse_mpdudensity(u8 mpdudensity)
+{
+/*
+ * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
+ * 0 for no restriction
+ * 1 for 1/4 us
+ * 2 for 1/2 us
+ * 3 for 1 us
+ * 4 for 2 us
+ * 5 for 4 us
+ * 6 for 8 us
+ * 7 for 16 us
+ */
+ switch (mpdudensity) {
+ case 0:
+ return 0;
+ case 1:
+ case 2:
+ case 3:
+ /* Our lower layer calculations limit our precision to
+ 1 microsecond */
+ return 1;
+ case 4:
+ return 2;
+ case 5:
+ return 4;
+ case 6:
+ return 8;
+ case 7:
+ return 16;
+ default:
+ return 0;
+ }
+}
+
+static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
+ if (ret)
+ return ret;
+
+ ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_wmi_peer_delete(ar, vdev_id, addr);
+ if (ret)
+ return ret;
+
+ ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
+{
+ struct ath10k_peer *peer, *tmp;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
+ if (peer->vdev_id != vdev_id)
+ continue;
+
+ ath10k_warn("removing stale peer %pM from vdev_id %d\n",
+ peer->addr, vdev_id);
+
+ list_del(&peer->list);
+ kfree(peer);
+ }
+ spin_unlock_bh(&ar->data_lock);
+}
+
+/************************/
+/* Interface management */
+/************************/
+
+static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
+{
+ int ret;
+
+ ret = wait_for_completion_timeout(&ar->vdev_setup_done,
+ ATH10K_VDEV_SETUP_TIMEOUT_HZ);
+ if (ret == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int ath10k_vdev_start(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_conf *conf = &ar->hw->conf;
+ struct ieee80211_channel *channel = conf->chandef.chan;
+ struct wmi_vdev_start_request_arg arg = {};
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ INIT_COMPLETION(ar->vdev_setup_done);
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.dtim_period = arvif->dtim_period;
+ arg.bcn_intval = arvif->beacon_interval;
+
+ arg.channel.freq = channel->center_freq;
+
+ arg.channel.band_center_freq1 = conf->chandef.center_freq1;
+
+ arg.channel.mode = chan_to_phymode(&conf->chandef);
+
+ arg.channel.min_power = channel->max_power * 3;
+ arg.channel.max_power = channel->max_power * 4;
+ arg.channel.max_reg_power = channel->max_reg_power * 4;
+ arg.channel.max_antenna_gain = channel->max_antenna_gain;
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ arg.ssid = arvif->u.ap.ssid;
+ arg.ssid_len = arvif->u.ap.ssid_len;
+ arg.hidden_ssid = arvif->u.ap.hidden_ssid;
+ } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
+ arg.ssid = arvif->vif->bss_conf.ssid;
+ arg.ssid_len = arvif->vif->bss_conf.ssid_len;
+ }
+
+ ret = ath10k_wmi_vdev_start(ar, &arg);
+ if (ret) {
+ ath10k_warn("WMI vdev start failed: ret %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_vdev_setup_sync(ar);
+ if (ret) {
+ ath10k_warn("vdev setup failed %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ath10k_vdev_stop(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ INIT_COMPLETION(ar->vdev_setup_done);
+
+ ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
+ if (ret) {
+ ath10k_warn("WMI vdev stop failed: ret %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_vdev_setup_sync(ar);
+ if (ret) {
+ ath10k_warn("vdev setup failed %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
+{
+ struct ieee80211_channel *channel = ar->hw->conf.chandef.chan;
+ struct wmi_vdev_start_request_arg arg = {};
+ enum nl80211_channel_type type;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ type = cfg80211_get_chandef_type(&ar->hw->conf.chandef);
+
+ arg.vdev_id = vdev_id;
+ arg.channel.freq = channel->center_freq;
+ arg.channel.band_center_freq1 = ar->hw->conf.chandef.center_freq1;
+
+ /* TODO setup this dynamically, what in case we
+ don't have any vifs? */
+ arg.channel.mode = chan_to_phymode(&ar->hw->conf.chandef);
+
+ arg.channel.min_power = channel->max_power * 3;
+ arg.channel.max_power = channel->max_power * 4;
+ arg.channel.max_reg_power = channel->max_reg_power * 4;
+ arg.channel.max_antenna_gain = channel->max_antenna_gain;
+
+ ret = ath10k_wmi_vdev_start(ar, &arg);
+ if (ret) {
+ ath10k_warn("Monitor vdev start failed: ret %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_vdev_setup_sync(ar);
+ if (ret) {
+ ath10k_warn("Monitor vdev setup failed %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
+ if (ret) {
+ ath10k_warn("Monitor vdev up failed: %d\n", ret);
+ goto vdev_stop;
+ }
+
+ ar->monitor_vdev_id = vdev_id;
+ ar->monitor_enabled = true;
+
+ return 0;
+
+vdev_stop:
+ ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
+ if (ret)
+ ath10k_warn("Monitor vdev stop failed: %d\n", ret);
+
+ return ret;
+}
+
+static int ath10k_monitor_stop(struct ath10k *ar)
+{
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* For some reasons, ath10k_wmi_vdev_down() here couse
+ * often ath10k_wmi_vdev_stop() to fail. Next we could
+ * not run monitor vdev and driver reload
+ * required. Don't see such problems we skip
+ * ath10k_wmi_vdev_down() here.
+ */
+
+ ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
+ if (ret)
+ ath10k_warn("Monitor vdev stop failed: %d\n", ret);
+
+ ret = ath10k_vdev_setup_sync(ar);
+ if (ret)
+ ath10k_warn("Monitor_down sync failed: %d\n", ret);
+
+ ar->monitor_enabled = false;
+ return ret;
+}
+
+static int ath10k_monitor_create(struct ath10k *ar)
+{
+ int bit, ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->monitor_present) {
+ ath10k_warn("Monitor mode already enabled\n");
+ return 0;
+ }
+
+ bit = ffs(ar->free_vdev_map);
+ if (bit == 0) {
+ ath10k_warn("No free VDEV slots\n");
+ return -ENOMEM;
+ }
+
+ ar->monitor_vdev_id = bit - 1;
+ ar->free_vdev_map &= ~(1 << ar->monitor_vdev_id);
+
+ ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id,
+ WMI_VDEV_TYPE_MONITOR,
+ 0, ar->mac_addr);
+ if (ret) {
+ ath10k_warn("WMI vdev monitor create failed: ret %d\n", ret);
+ goto vdev_fail;
+ }
+
+ ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface created, vdev id: %d\n",
+ ar->monitor_vdev_id);
+
+ ar->monitor_present = true;
+ return 0;
+
+vdev_fail:
+ /*
+ * Restore the ID to the global map.
+ */
+ ar->free_vdev_map |= 1 << (ar->monitor_vdev_id);
+ return ret;
+}
+
+static int ath10k_monitor_destroy(struct ath10k *ar)
+{
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!ar->monitor_present)
+ return 0;
+
+ ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
+ if (ret) {
+ ath10k_warn("WMI vdev monitor delete failed: %d\n", ret);
+ return ret;
+ }
+
+ ar->free_vdev_map |= 1 << (ar->monitor_vdev_id);
+ ar->monitor_present = false;
+
+ ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface destroyed, vdev id: %d\n",
+ ar->monitor_vdev_id);
+ return ret;
+}
+
+static void ath10k_control_beaconing(struct ath10k_vif *arvif,
+ struct ieee80211_bss_conf *info)
+{
+ int ret = 0;
+
+ if (!info->enable_beacon) {
+ ath10k_vdev_stop(arvif);
+ return;
+ }
+
+ arvif->tx_seq_no = 0x1000;
+
+ ret = ath10k_vdev_start(arvif);
+ if (ret)
+ return;
+
+ ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, 0, info->bssid);
+ if (ret) {
+ ath10k_warn("Failed to bring up VDEV: %d\n",
+ arvif->vdev_id);
+ return;
+ }
+ ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d up\n", arvif->vdev_id);
+}
+
+static void ath10k_control_ibss(struct ath10k_vif *arvif,
+ struct ieee80211_bss_conf *info,
+ const u8 self_peer[ETH_ALEN])
+{
+ int ret = 0;
+
+ if (!info->ibss_joined) {
+ ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer);
+ if (ret)
+ ath10k_warn("Failed to delete IBSS self peer:%pM for VDEV:%d ret:%d\n",
+ self_peer, arvif->vdev_id, ret);
+
+ if (is_zero_ether_addr(arvif->u.ibss.bssid))
+ return;
+
+ ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id,
+ arvif->u.ibss.bssid);
+ if (ret) {
+ ath10k_warn("Failed to delete IBSS BSSID peer:%pM for VDEV:%d ret:%d\n",
+ arvif->u.ibss.bssid, arvif->vdev_id, ret);
+ return;
+ }
+
+ memset(arvif->u.ibss.bssid, 0, ETH_ALEN);
+
+ return;
+ }
+
+ ret = ath10k_peer_create(arvif->ar, arvif->vdev_id, self_peer);
+ if (ret) {
+ ath10k_warn("Failed to create IBSS self peer:%pM for VDEV:%d ret:%d\n",
+ self_peer, arvif->vdev_id, ret);
+ return;
+ }
+
+ ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_ATIM_WINDOW,
+ ATH10K_DEFAULT_ATIM);
+ if (ret)
+ ath10k_warn("Failed to set IBSS ATIM for VDEV:%d ret:%d\n",
+ arvif->vdev_id, ret);
+}
+
+/*
+ * Review this when mac80211 gains per-interface powersave support.
+ */
+static void ath10k_ps_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ath10k_generic_iter *ar_iter = data;
+ struct ieee80211_conf *conf = &ar_iter->ar->hw->conf;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ enum wmi_sta_powersave_param param;
+ enum wmi_sta_ps_mode psmode;
+ int ret;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (conf->flags & IEEE80211_CONF_PS) {
+ psmode = WMI_STA_PS_MODE_ENABLED;
+ param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
+
+ ret = ath10k_wmi_set_sta_ps_param(ar_iter->ar,
+ arvif->vdev_id,
+ param,
+ conf->dynamic_ps_timeout);
+ if (ret) {
+ ath10k_warn("Failed to set inactivity time for VDEV: %d\n",
+ arvif->vdev_id);
+ return;
+ }
+
+ ar_iter->ret = ret;
+ } else {
+ psmode = WMI_STA_PS_MODE_DISABLED;
+ }
+
+ ar_iter->ret = ath10k_wmi_set_psmode(ar_iter->ar, arvif->vdev_id,
+ psmode);
+ if (ar_iter->ret)
+ ath10k_warn("Failed to set PS Mode: %d for VDEV: %d\n",
+ psmode, arvif->vdev_id);
+ else
+ ath10k_dbg(ATH10K_DBG_MAC, "Set PS Mode: %d for VDEV: %d\n",
+ psmode, arvif->vdev_id);
+}
+
+/**********************/
+/* Station management */
+/**********************/
+
+static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
+ struct ath10k_vif *arvif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_bss_conf *bss_conf,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ memcpy(arg->addr, sta->addr, ETH_ALEN);
+ arg->vdev_id = arvif->vdev_id;
+ arg->peer_aid = sta->aid;
+ arg->peer_flags |= WMI_PEER_AUTH;
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ /*
+ * Seems FW have problems with Power Save in STA
+ * mode when we setup this parameter to high (eg. 5).
+ * Often we see that FW don't send NULL (with clean P flags)
+ * frame even there is info about buffered frames in beacons.
+ * Sometimes we have to wait more than 10 seconds before FW
+ * will wakeup. Often sending one ping from AP to our device
+ * just fail (more than 50%).
+ *
+ * Seems setting this FW parameter to 1 couse FW
+ * will check every beacon and will wakup immediately
+ * after detection buffered data.
+ */
+ arg->peer_listen_intval = 1;
+ else
+ arg->peer_listen_intval = ar->hw->conf.listen_interval;
+
+ arg->peer_num_spatial_streams = 1;
+
+ /*
+ * The assoc capabilities are available only in managed mode.
+ */
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA && bss_conf)
+ arg->peer_caps = bss_conf->assoc_capability;
+}
+
+static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
+ struct ath10k_vif *arvif,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ieee80211_bss_conf *info = &vif->bss_conf;
+ struct cfg80211_bss *bss;
+ const u8 *rsnie = NULL;
+ const u8 *wpaie = NULL;
+
+ bss = cfg80211_get_bss(ar->hw->wiphy, ar->hw->conf.chandef.chan,
+ info->bssid, NULL, 0, 0, 0);
+ if (bss) {
+ const struct cfg80211_bss_ies *ies;
+
+ rcu_read_lock();
+ rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
+
+ ies = rcu_dereference(bss->ies);
+
+ wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WPA,
+ ies->data,
+ ies->len);
+ rcu_read_unlock();
+ cfg80211_put_bss(ar->hw->wiphy, bss);
+ }
+
+ /* FIXME: base on RSN IE/WPA IE is a correct idea? */
+ if (rsnie || wpaie) {
+ ath10k_dbg(ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__);
+ arg->peer_flags |= WMI_PEER_NEED_PTK_4_WAY;
+ }
+
+ if (wpaie) {
+ ath10k_dbg(ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__);
+ arg->peer_flags |= WMI_PEER_NEED_GTK_2_WAY;
+ }
+}
+
+static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
+ struct ieee80211_sta *sta,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
+ const struct ieee80211_supported_band *sband;
+ const struct ieee80211_rate *rates;
+ u32 ratemask;
+ int i;
+
+ sband = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band];
+ ratemask = sta->supp_rates[ar->hw->conf.chandef.chan->band];
+ rates = sband->bitrates;
+
+ rateset->num_rates = 0;
+
+ for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
+ if (!(ratemask & 1))
+ continue;
+
+ rateset->rates[rateset->num_rates] = rates->hw_value;
+ rateset->num_rates++;
+ }
+}
+
+static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
+ struct ieee80211_sta *sta,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ int smps;
+ int i, n;
+
+ if (!ht_cap->ht_supported)
+ return;
+
+ arg->peer_flags |= WMI_PEER_HT;
+ arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ ht_cap->ampdu_factor)) - 1;
+
+ arg->peer_mpdu_density =
+ ath10k_parse_mpdudensity(ht_cap->ampdu_density);
+
+ arg->peer_ht_caps = ht_cap->cap;
+ arg->peer_rate_caps |= WMI_RC_HT_FLAG;
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
+ arg->peer_flags |= WMI_PEER_LDPC;
+
+ if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
+ arg->peer_flags |= WMI_PEER_40MHZ;
+ arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
+ }
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
+ arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
+ arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
+ arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG;
+ arg->peer_flags |= WMI_PEER_STBC;
+ }
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
+ u32 stbc;
+ stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
+ stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ stbc = stbc << WMI_RC_RX_STBC_FLAG_S;
+ arg->peer_rate_caps |= stbc;
+ arg->peer_flags |= WMI_PEER_STBC;
+ }
+
+ smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
+ smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ if (smps == WLAN_HT_CAP_SM_PS_STATIC) {
+ arg->peer_flags |= WMI_PEER_SPATIAL_MUX;
+ arg->peer_flags |= WMI_PEER_STATIC_MIMOPS;
+ } else if (smps == WLAN_HT_CAP_SM_PS_DYNAMIC) {
+ arg->peer_flags |= WMI_PEER_SPATIAL_MUX;
+ arg->peer_flags |= WMI_PEER_DYN_MIMOPS;
+ }
+
+ if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
+ arg->peer_rate_caps |= WMI_RC_TS_FLAG;
+ else if (ht_cap->mcs.rx_mask[1])
+ arg->peer_rate_caps |= WMI_RC_DS_FLAG;
+
+ for (i = 0, n = 0; i < IEEE80211_HT_MCS_MASK_LEN*8; i++)
+ if (ht_cap->mcs.rx_mask[i/8] & (1 << i%8))
+ arg->peer_ht_rates.rates[n++] = i;
+
+ arg->peer_ht_rates.num_rates = n;
+ arg->peer_num_spatial_streams = max((n+7) / 8, 1);
+
+ ath10k_dbg(ATH10K_DBG_MAC, "mcs cnt %d nss %d\n",
+ arg->peer_ht_rates.num_rates,
+ arg->peer_num_spatial_streams);
+}
+
+static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar,
+ struct ath10k_vif *arvif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_bss_conf *bss_conf,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ u32 uapsd = 0;
+ u32 max_sp = 0;
+
+ if (sta->wme)
+ arg->peer_flags |= WMI_PEER_QOS;
+
+ if (sta->wme && sta->uapsd_queues) {
+ ath10k_dbg(ATH10K_DBG_MAC, "uapsd_queues: 0x%X, max_sp: %d\n",
+ sta->uapsd_queues, sta->max_sp);
+
+ arg->peer_flags |= WMI_PEER_APSD;
+ arg->peer_flags |= WMI_RC_UAPSD_FLAG;
+
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+ uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+ uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+ uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+ uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
+
+
+ if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
+ max_sp = sta->max_sp;
+
+ ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
+ sta->addr,
+ WMI_AP_PS_PEER_PARAM_UAPSD,
+ uapsd);
+
+ ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
+ sta->addr,
+ WMI_AP_PS_PEER_PARAM_MAX_SP,
+ max_sp);
+
+ /* TODO setup this based on STA listen interval and
+ beacon interval. Currently we don't know
+ sta->listen_interval - mac80211 patch required.
+ Currently use 10 seconds */
+ ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
+ sta->addr,
+ WMI_AP_PS_PEER_PARAM_AGEOUT_TIME,
+ 10);
+ }
+}
+
+static void ath10k_peer_assoc_h_qos_sta(struct ath10k *ar,
+ struct ath10k_vif *arvif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_bss_conf *bss_conf,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ if (bss_conf->qos)
+ arg->peer_flags |= WMI_PEER_QOS;
+}
+
+static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
+ struct ieee80211_sta *sta,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+
+ if (!vht_cap->vht_supported)
+ return;
+
+ arg->peer_flags |= WMI_PEER_VHT;
+
+ arg->peer_vht_caps = vht_cap->cap;
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ arg->peer_flags |= WMI_PEER_80MHZ;
+
+ arg->peer_vht_rates.rx_max_rate =
+ __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
+ arg->peer_vht_rates.rx_mcs_set =
+ __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
+ arg->peer_vht_rates.tx_max_rate =
+ __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
+ arg->peer_vht_rates.tx_mcs_set =
+ __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
+
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer\n");
+}
+
+static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
+ struct ath10k_vif *arvif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_bss_conf *bss_conf,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_AP:
+ ath10k_peer_assoc_h_qos_ap(ar, arvif, sta, bss_conf, arg);
+ break;
+ case WMI_VDEV_TYPE_STA:
+ ath10k_peer_assoc_h_qos_sta(ar, arvif, sta, bss_conf, arg);
+ break;
+ default:
+ break;
+ }
+}
+
+static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
+ struct ath10k_vif *arvif,
+ struct ieee80211_sta *sta,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ enum wmi_phy_mode phymode = MODE_UNKNOWN;
+
+ /* FIXME: add VHT */
+
+ switch (ar->hw->conf.chandef.chan->band) {
+ case IEEE80211_BAND_2GHZ:
+ if (sta->ht_cap.ht_supported) {
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11NG_HT40;
+ else
+ phymode = MODE_11NG_HT20;
+ } else {
+ phymode = MODE_11G;
+ }
+
+ break;
+ case IEEE80211_BAND_5GHZ:
+ if (sta->ht_cap.ht_supported) {
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11NA_HT40;
+ else
+ phymode = MODE_11NA_HT20;
+ } else {
+ phymode = MODE_11A;
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ arg->peer_phymode = phymode;
+ WARN_ON(phymode == MODE_UNKNOWN);
+}
+
+static int ath10k_peer_assoc(struct ath10k *ar,
+ struct ath10k_vif *arvif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct wmi_peer_assoc_complete_arg arg;
+
+ memset(&arg, 0, sizeof(struct wmi_peer_assoc_complete_arg));
+
+ ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, &arg);
+ ath10k_peer_assoc_h_crypto(ar, arvif, &arg);
+ ath10k_peer_assoc_h_rates(ar, sta, &arg);
+ ath10k_peer_assoc_h_ht(ar, sta, &arg);
+ ath10k_peer_assoc_h_vht(ar, sta, &arg);
+ ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, &arg);
+ ath10k_peer_assoc_h_phymode(ar, arvif, sta, &arg);
+
+ return ath10k_wmi_peer_assoc(ar, &arg);
+}
+
+/* can be called only in mac80211 callbacks due to `key_count` usage */
+static void ath10k_bss_assoc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ieee80211_sta *ap_sta;
+ int ret;
+
+ rcu_read_lock();
+
+ ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (!ap_sta) {
+ ath10k_warn("Failed to find station entry for %pM\n",
+ bss_conf->bssid);
+ rcu_read_unlock();
+ return;
+ }
+
+ ret = ath10k_peer_assoc(ar, arvif, ap_sta, bss_conf);
+ if (ret) {
+ ath10k_warn("Peer assoc failed for %pM\n", bss_conf->bssid);
+ rcu_read_unlock();
+ return;
+ }
+
+ rcu_read_unlock();
+
+ ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, bss_conf->aid,
+ bss_conf->bssid);
+ if (ret)
+ ath10k_warn("VDEV: %d up failed: ret %d\n",
+ arvif->vdev_id, ret);
+ else
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "VDEV: %d associated, BSSID: %pM, AID: %d\n",
+ arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
+}
+
+/*
+ * FIXME: flush TIDs
+ */
+static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ int ret;
+
+ /*
+ * For some reason, calling VDEV-DOWN before VDEV-STOP
+ * makes the FW to send frames via HTT after disassociation.
+ * No idea why this happens, even though VDEV-DOWN is supposed
+ * to be analogous to link down, so just stop the VDEV.
+ */
+ ret = ath10k_vdev_stop(arvif);
+ if (!ret)
+ ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d stopped\n",
+ arvif->vdev_id);
+
+ /*
+ * If we don't call VDEV-DOWN after VDEV-STOP FW will remain active and
+ * report beacons from previously associated network through HTT.
+ * This in turn would spam mac80211 WARN_ON if we bring down all
+ * interfaces as it expects there is no rx when no interface is
+ * running.
+ */
+ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret)
+ ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d ath10k_wmi_vdev_down failed (%d)\n",
+ arvif->vdev_id, ret);
+
+ ath10k_wmi_flush_tx(ar);
+
+ arvif->def_wep_key_index = 0;
+}
+
+static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ int ret = 0;
+
+ ret = ath10k_peer_assoc(ar, arvif, sta, NULL);
+ if (ret) {
+ ath10k_warn("WMI peer assoc failed for %pM\n", sta->addr);
+ return ret;
+ }
+
+ ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
+ if (ret) {
+ ath10k_warn("could not install peer wep keys (%d)\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ int ret = 0;
+
+ ret = ath10k_clear_peer_keys(arvif, sta->addr);
+ if (ret) {
+ ath10k_warn("could not clear all peer wep keys (%d)\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**************/
+/* Regulatory */
+/**************/
+
+static int ath10k_update_channel_list(struct ath10k *ar)
+{
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_supported_band **bands;
+ enum ieee80211_band band;
+ struct ieee80211_channel *channel;
+ struct wmi_scan_chan_list_arg arg = {0};
+ struct wmi_channel_arg *ch;
+ bool passive;
+ int len;
+ int ret;
+ int i;
+
+ bands = hw->wiphy->bands;
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ if (!bands[band])
+ continue;
+
+ for (i = 0; i < bands[band]->n_channels; i++) {
+ if (bands[band]->channels[i].flags &
+ IEEE80211_CHAN_DISABLED)
+ continue;
+
+ arg.n_channels++;
+ }
+ }
+
+ len = sizeof(struct wmi_channel_arg) * arg.n_channels;
+ arg.channels = kzalloc(len, GFP_KERNEL);
+ if (!arg.channels)
+ return -ENOMEM;
+
+ ch = arg.channels;
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ if (!bands[band])
+ continue;
+
+ for (i = 0; i < bands[band]->n_channels; i++) {
+ channel = &bands[band]->channels[i];
+
+ if (channel->flags & IEEE80211_CHAN_DISABLED)
+ continue;
+
+ ch->allow_ht = true;
+
+ /* FIXME: when should we really allow VHT? */
+ ch->allow_vht = true;
+
+ ch->allow_ibss =
+ !(channel->flags & IEEE80211_CHAN_NO_IBSS);
+
+ ch->ht40plus =
+ !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS);
+
+ passive = channel->flags & IEEE80211_CHAN_PASSIVE_SCAN;
+ ch->passive = passive;
+
+ ch->freq = channel->center_freq;
+ ch->min_power = channel->max_power * 3;
+ ch->max_power = channel->max_power * 4;
+ ch->max_reg_power = channel->max_reg_power * 4;
+ ch->max_antenna_gain = channel->max_antenna_gain;
+ ch->reg_class_id = 0; /* FIXME */
+
+ /* FIXME: why use only legacy modes, why not any
+ * HT/VHT modes? Would that even make any
+ * difference? */
+ if (channel->band == IEEE80211_BAND_2GHZ)
+ ch->mode = MODE_11G;
+ else
+ ch->mode = MODE_11A;
+
+ if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN))
+ continue;
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "%s: [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
+ __func__, ch - arg.channels, arg.n_channels,
+ ch->freq, ch->max_power, ch->max_reg_power,
+ ch->max_antenna_gain, ch->mode);
+
+ ch++;
+ }
+ }
+
+ ret = ath10k_wmi_scan_chan_list(ar, &arg);
+ kfree(arg.channels);
+
+ return ret;
+}
+
+static void ath10k_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct reg_dmn_pair_mapping *regpair;
+ struct ath10k *ar = hw->priv;
+ int ret;
+
+ ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
+
+ ret = ath10k_update_channel_list(ar);
+ if (ret)
+ ath10k_warn("could not update channel list (%d)\n", ret);
+
+ regpair = ar->ath_common.regulatory.regpair;
+ /* Target allows setting up per-band regdomain but ath_common provides
+ * a combined one only */
+ ret = ath10k_wmi_pdev_set_regdomain(ar,
+ regpair->regDmnEnum,
+ regpair->regDmnEnum, /* 2ghz */
+ regpair->regDmnEnum, /* 5ghz */
+ regpair->reg_2ghz_ctl,
+ regpair->reg_5ghz_ctl);
+ if (ret)
+ ath10k_warn("could not set pdev regdomain (%d)\n", ret);
+}
+
+/***************/
+/* TX handlers */
+/***************/
+
+/*
+ * Frames sent to the FW have to be in "Native Wifi" format.
+ * Strip the QoS field from the 802.11 header.
+ */
+static void ath10k_tx_h_qos_workaround(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ u8 *qos_ctl;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return;
+
+ qos_ctl = ieee80211_get_qos_ctl(hdr);
+ memmove(qos_ctl, qos_ctl + IEEE80211_QOS_CTL_LEN,
+ skb->len - ieee80211_hdrlen(hdr->frame_control));
+ skb_trim(skb, skb->len - IEEE80211_QOS_CTL_LEN);
+}
+
+static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ int ret;
+
+ /* TODO AP mode should be implemented */
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (!ieee80211_has_protected(hdr->frame_control))
+ return;
+
+ if (!key)
+ return;
+
+ if (key->cipher != WLAN_CIPHER_SUITE_WEP40 &&
+ key->cipher != WLAN_CIPHER_SUITE_WEP104)
+ return;
+
+ if (key->keyidx == arvif->def_wep_key_index)
+ return;
+
+ ath10k_dbg(ATH10K_DBG_MAC, "new wep keyidx will be %d\n", key->keyidx);
+
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_DEF_KEYID,
+ key->keyidx);
+ if (ret) {
+ ath10k_warn("could not update wep keyidx (%d)\n", ret);
+ return;
+ }
+
+ arvif->def_wep_key_index = key->keyidx;
+}
+
+static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+ /* This is case only for P2P_GO */
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP ||
+ arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
+ return;
+
+ if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
+ spin_lock_bh(&ar->data_lock);
+ if (arvif->u.ap.noa_data)
+ if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len,
+ GFP_ATOMIC))
+ memcpy(skb_put(skb, arvif->u.ap.noa_len),
+ arvif->u.ap.noa_data,
+ arvif->u.ap.noa_len);
+ spin_unlock_bh(&ar->data_lock);
+ }
+}
+
+static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int ret;
+
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ ret = ath10k_htt_mgmt_tx(ar->htt, skb);
+ else if (ieee80211_is_nullfunc(hdr->frame_control))
+ /* FW does not report tx status properly for NullFunc frames
+ * unless they are sent through mgmt tx path. mac80211 sends
+ * those frames when it detects link/beacon loss and depends on
+ * the tx status to be correct. */
+ ret = ath10k_htt_mgmt_tx(ar->htt, skb);
+ else
+ ret = ath10k_htt_tx(ar->htt, skb);
+
+ if (ret) {
+ ath10k_warn("tx failed (%d). dropping packet.\n", ret);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+}
+
+void ath10k_offchan_tx_purge(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ for (;;) {
+ skb = skb_dequeue(&ar->offchan_tx_queue);
+ if (!skb)
+ break;
+
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+}
+
+void ath10k_offchan_tx_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work);
+ struct ath10k_peer *peer;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff *skb;
+ const u8 *peer_addr;
+ int vdev_id;
+ int ret;
+
+ /* FW requirement: We must create a peer before FW will send out
+ * an offchannel frame. Otherwise the frame will be stuck and
+ * never transmitted. We delete the peer upon tx completion.
+ * It is unlikely that a peer for offchannel tx will already be
+ * present. However it may be in some rare cases so account for that.
+ * Otherwise we might remove a legitimate peer and break stuff. */
+
+ for (;;) {
+ skb = skb_dequeue(&ar->offchan_tx_queue);
+ if (!skb)
+ break;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_dbg(ATH10K_DBG_MAC, "processing offchannel skb %p\n",
+ skb);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ peer_addr = ieee80211_get_DA(hdr);
+ vdev_id = ATH10K_SKB_CB(skb)->htt.vdev_id;
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find(ar, vdev_id, peer_addr);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (peer)
+ ath10k_dbg(ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
+ peer_addr, vdev_id);
+
+ if (!peer) {
+ ret = ath10k_peer_create(ar, vdev_id, peer_addr);
+ if (ret)
+ ath10k_warn("peer %pM on vdev %d not created (%d)\n",
+ peer_addr, vdev_id, ret);
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ INIT_COMPLETION(ar->offchan_tx_completed);
+ ar->offchan_tx_skb = skb;
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_tx_htt(ar, skb);
+
+ ret = wait_for_completion_timeout(&ar->offchan_tx_completed,
+ 3 * HZ);
+ if (ret <= 0)
+ ath10k_warn("timed out waiting for offchannel skb %p\n",
+ skb);
+
+ if (!peer) {
+ ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
+ if (ret)
+ ath10k_warn("peer %pM on vdev %d not deleted (%d)\n",
+ peer_addr, vdev_id, ret);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ }
+}
+
+/************/
+/* Scanning */
+/************/
+
+/*
+ * This gets called if we dont get a heart-beat during scan.
+ * This may indicate the FW has hung and we need to abort the
+ * scan manually to prevent cancel_hw_scan() from deadlocking
+ */
+void ath10k_reset_scan(unsigned long ptr)
+{
+ struct ath10k *ar = (struct ath10k *)ptr;
+
+ spin_lock_bh(&ar->data_lock);
+ if (!ar->scan.in_progress) {
+ spin_unlock_bh(&ar->data_lock);
+ return;
+ }
+
+ ath10k_warn("scan timeout. resetting. fw issue?\n");
+
+ if (ar->scan.is_roc)
+ ieee80211_remain_on_channel_expired(ar->hw);
+ else
+ ieee80211_scan_completed(ar->hw, 1 /* aborted */);
+
+ ar->scan.in_progress = false;
+ complete_all(&ar->scan.completed);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath10k_abort_scan(struct ath10k *ar)
+{
+ struct wmi_stop_scan_arg arg = {
+ .req_id = 1, /* FIXME */
+ .req_type = WMI_SCAN_STOP_ONE,
+ .u.scan_id = ATH10K_SCAN_ID,
+ };
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ del_timer_sync(&ar->scan.timeout);
+
+ spin_lock_bh(&ar->data_lock);
+ if (!ar->scan.in_progress) {
+ spin_unlock_bh(&ar->data_lock);
+ return 0;
+ }
+
+ ar->scan.aborting = true;
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = ath10k_wmi_stop_scan(ar, &arg);
+ if (ret) {
+ ath10k_warn("could not submit wmi stop scan (%d)\n", ret);
+ return -EIO;
+ }
+
+ ath10k_wmi_flush_tx(ar);
+
+ ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ);
+ if (ret == 0)
+ ath10k_warn("timed out while waiting for scan to stop\n");
+
+ /* scan completion may be done right after we timeout here, so let's
+ * check the in_progress and tell mac80211 scan is completed. if we
+ * don't do that and FW fails to send us scan completion indication
+ * then userspace won't be able to scan anymore */
+ ret = 0;
+
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.in_progress) {
+ ath10k_warn("could not stop scan. its still in progress\n");
+ ar->scan.in_progress = false;
+ ath10k_offchan_tx_purge(ar);
+ ret = -ETIMEDOUT;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ return ret;
+}
+
+static int ath10k_start_scan(struct ath10k *ar,
+ const struct wmi_start_scan_arg *arg)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_wmi_start_scan(ar, arg);
+ if (ret)
+ return ret;
+
+ /* make sure we submit the command so the completion
+ * timeout makes sense */
+ ath10k_wmi_flush_tx(ar);
+
+ ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ);
+ if (ret == 0) {
+ ath10k_abort_scan(ar);
+ return ret;
+ }
+
+ /* the scan can complete earlier, before we even
+ * start the timer. in that case the timer handler
+ * checks ar->scan.in_progress and bails out if its
+ * false. Add a 200ms margin to account event/command
+ * processing. */
+ mod_timer(&ar->scan.timeout, jiffies +
+ msecs_to_jiffies(arg->max_scan_time+200));
+ return 0;
+}
+
+/**********************/
+/* mac80211 callbacks */
+/**********************/
+
+static void ath10k_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = NULL;
+ u32 vdev_id = 0;
+ u8 tid;
+
+ if (info->control.vif) {
+ arvif = ath10k_vif_to_arvif(info->control.vif);
+ vdev_id = arvif->vdev_id;
+ } else if (ar->monitor_enabled) {
+ vdev_id = ar->monitor_vdev_id;
+ }
+
+ /* We should disable CCK RATE due to P2P */
+ if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
+ ath10k_dbg(ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
+
+ /* we must calculate tid before we apply qos workaround
+ * as we'd lose the qos control field */
+ tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+ if (ieee80211_is_data_qos(hdr->frame_control) &&
+ is_unicast_ether_addr(ieee80211_get_DA(hdr))) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ }
+
+ ath10k_tx_h_qos_workaround(hw, control, skb);
+ ath10k_tx_h_update_wep_key(skb);
+ ath10k_tx_h_add_p2p_noa_ie(ar, skb);
+ ath10k_tx_h_seq_no(skb);
+
+ memset(ATH10K_SKB_CB(skb), 0, sizeof(*ATH10K_SKB_CB(skb)));
+ ATH10K_SKB_CB(skb)->htt.vdev_id = vdev_id;
+ ATH10K_SKB_CB(skb)->htt.tid = tid;
+
+ if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
+ spin_lock_bh(&ar->data_lock);
+ ATH10K_SKB_CB(skb)->htt.is_offchan = true;
+ ATH10K_SKB_CB(skb)->htt.vdev_id = ar->scan.vdev_id;
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_dbg(ATH10K_DBG_MAC, "queued offchannel skb %p\n", skb);
+
+ skb_queue_tail(&ar->offchan_tx_queue, skb);
+ ieee80211_queue_work(hw, &ar->offchan_tx_work);
+ return;
+ }
+
+ ath10k_tx_htt(ar, skb);
+}
+
+/*
+ * Initialize various parameters with default vaules.
+ */
+static int ath10k_start(struct ieee80211_hw *hw)
+{
+ struct ath10k *ar = hw->priv;
+ int ret;
+
+ ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 1);
+ if (ret)
+ ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n",
+ ret);
+
+ ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 0);
+ if (ret)
+ ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n",
+ ret);
+
+ return 0;
+}
+
+static void ath10k_stop(struct ieee80211_hw *hw)
+{
+ struct ath10k *ar = hw->priv;
+
+ /* avoid leaks in case FW never confirms scan for offchannel */
+ cancel_work_sync(&ar->offchan_tx_work);
+ ath10k_offchan_tx_purge(ar);
+}
+
+static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct ath10k_generic_iter ar_iter;
+ struct ath10k *ar = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+ int ret = 0;
+ u32 flags;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ath10k_dbg(ATH10K_DBG_MAC, "Config channel %d mhz\n",
+ conf->chandef.chan->center_freq);
+ spin_lock_bh(&ar->data_lock);
+ ar->rx_channel = conf->chandef.chan;
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_PS) {
+ memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
+ ar_iter.ar = ar;
+ flags = IEEE80211_IFACE_ITER_RESUME_ALL;
+
+ ieee80211_iterate_active_interfaces_atomic(hw,
+ flags,
+ ath10k_ps_iter,
+ &ar_iter);
+
+ ret = ar_iter.ret;
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (conf->flags & IEEE80211_CONF_MONITOR)
+ ret = ath10k_monitor_create(ar);
+ else
+ ret = ath10k_monitor_destroy(ar);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+/*
+ * TODO:
+ * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
+ * because we will send mgmt frames without CCK. This requirement
+ * for P2P_FIND/GO_NEG should be handled by checking CCK flag
+ * in the TX packet.
+ */
+static int ath10k_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ enum wmi_sta_powersave_param param;
+ int ret = 0;
+ u32 value;
+ int bit;
+
+ mutex_lock(&ar->conf_mutex);
+
+ arvif->ar = ar;
+ arvif->vif = vif;
+
+ if ((vif->type == NL80211_IFTYPE_MONITOR) && ar->monitor_present) {
+ ath10k_warn("Only one monitor interface allowed\n");
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ bit = ffs(ar->free_vdev_map);
+ if (bit == 0) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ arvif->vdev_id = bit - 1;
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
+ ar->free_vdev_map &= ~(1 << arvif->vdev_id);
+
+ if (ar->p2p)
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_STATION:
+ arvif->vdev_type = WMI_VDEV_TYPE_STA;
+ if (vif->p2p)
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ arvif->vdev_type = WMI_VDEV_TYPE_IBSS;
+ break;
+ case NL80211_IFTYPE_AP:
+ arvif->vdev_type = WMI_VDEV_TYPE_AP;
+
+ if (vif->p2p)
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO;
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ ath10k_dbg(ATH10K_DBG_MAC, "Add interface: id %d type %d subtype %d\n",
+ arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype);
+
+ ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
+ arvif->vdev_subtype, vif->addr);
+ if (ret) {
+ ath10k_warn("WMI vdev create failed: ret %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath10k_wmi_vdev_set_param(ar, 0, WMI_VDEV_PARAM_DEF_KEYID,
+ arvif->def_wep_key_index);
+ if (ret)
+ ath10k_warn("Failed to set default keyid: %d\n", ret);
+
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+ ATH10K_HW_TXRX_NATIVE_WIFI);
+ if (ret)
+ ath10k_warn("Failed to set TX encap: %d\n", ret);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
+ if (ret) {
+ ath10k_warn("Failed to create peer for AP: %d\n", ret);
+ goto exit;
+ }
+ }
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
+ param = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
+ value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
+ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param, value);
+ if (ret)
+ ath10k_warn("Failed to set RX wake policy: %d\n", ret);
+
+ param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
+ value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
+ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param, value);
+ if (ret)
+ ath10k_warn("Failed to set TX wake thresh: %d\n", ret);
+
+ param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
+ value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
+ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param, value);
+ if (ret)
+ ath10k_warn("Failed to set PSPOLL count: %d\n", ret);
+ }
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
+ ar->monitor_present = true;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath10k_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_dbg(ATH10K_DBG_MAC, "Remove interface: id %d\n", arvif->vdev_id);
+
+ ar->free_vdev_map |= 1 << (arvif->vdev_id);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
+ if (ret)
+ ath10k_warn("Failed to remove peer for AP: %d\n", ret);
+
+ kfree(arvif->u.ap.noa_data);
+ }
+
+ ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
+ if (ret)
+ ath10k_warn("WMI vdev delete failed: %d\n", ret);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
+ ar->monitor_present = false;
+
+ ath10k_peer_cleanup(ar, arvif->vdev_id);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+/*
+ * FIXME: Has to be verified.
+ */
+#define SUPPORTED_FILTERS \
+ (FIF_PROMISC_IN_BSS | \
+ FIF_ALLMULTI | \
+ FIF_CONTROL | \
+ FIF_PSPOLL | \
+ FIF_OTHER_BSS | \
+ FIF_BCN_PRBRESP_PROMISC | \
+ FIF_PROBE_REQ | \
+ FIF_FCSFAIL)
+
+static void ath10k_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct ath10k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ changed_flags &= SUPPORTED_FILTERS;
+ *total_flags &= SUPPORTED_FILTERS;
+ ar->filter_flags = *total_flags;
+
+ if ((ar->filter_flags & FIF_PROMISC_IN_BSS) &&
+ !ar->monitor_enabled) {
+ ret = ath10k_monitor_start(ar, ar->monitor_vdev_id);
+ if (ret)
+ ath10k_warn("Unable to start monitor mode\n");
+ else
+ ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode started\n");
+ } else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) &&
+ ar->monitor_enabled) {
+ ret = ath10k_monitor_stop(ar);
+ if (ret)
+ ath10k_warn("Unable to stop monitor mode\n");
+ else
+ ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode stopped\n");
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u32 changed)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (changed & BSS_CHANGED_IBSS)
+ ath10k_control_ibss(arvif, info, vif->addr);
+
+ if (changed & BSS_CHANGED_BEACON_INT) {
+ arvif->beacon_interval = info->beacon_int;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_BEACON_INTERVAL,
+ arvif->beacon_interval);
+ if (ret)
+ ath10k_warn("Failed to set beacon interval for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "Beacon interval: %d set for VDEV: %d\n",
+ arvif->beacon_interval, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_BEACON) {
+ ret = ath10k_wmi_pdev_set_param(ar,
+ WMI_PDEV_PARAM_BEACON_TX_MODE,
+ WMI_BEACON_STAGGERED_MODE);
+ if (ret)
+ ath10k_warn("Failed to set beacon mode for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "Set staggered beacon mode for VDEV: %d\n",
+ arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_BEACON_INFO) {
+ arvif->dtim_period = info->dtim_period;
+
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_DTIM_PERIOD,
+ arvif->dtim_period);
+ if (ret)
+ ath10k_warn("Failed to set dtim period for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "Set dtim period: %d for VDEV: %d\n",
+ arvif->dtim_period, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_SSID &&
+ vif->type == NL80211_IFTYPE_AP) {
+ arvif->u.ap.ssid_len = info->ssid_len;
+ if (info->ssid_len)
+ memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len);
+ arvif->u.ap.hidden_ssid = info->hidden_ssid;
+ }
+
+ if (changed & BSS_CHANGED_BSSID) {
+ if (!is_zero_ether_addr(info->bssid)) {
+ ret = ath10k_peer_create(ar, arvif->vdev_id,
+ info->bssid);
+ if (ret)
+ ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
+ info->bssid, arvif->vdev_id);
+ else
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "Added peer: %pM for VDEV: %d\n",
+ info->bssid, arvif->vdev_id);
+
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ /*
+ * this is never erased as we it for crypto key
+ * clearing; this is FW requirement
+ */
+ memcpy(arvif->u.sta.bssid, info->bssid,
+ ETH_ALEN);
+
+ ret = ath10k_vdev_start(arvif);
+ if (!ret)
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "VDEV: %d started with BSSID: %pM\n",
+ arvif->vdev_id, info->bssid);
+ }
+
+ /*
+ * Mac80211 does not keep IBSS bssid when leaving IBSS,
+ * so driver need to store it. It is needed when leaving
+ * IBSS in order to remove BSSID peer.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC)
+ memcpy(arvif->u.ibss.bssid, info->bssid,
+ ETH_ALEN);
+ }
+ }
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED)
+ ath10k_control_beaconing(arvif, info);
+
+ if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+ u32 cts_prot;
+ if (info->use_cts_prot)
+ cts_prot = 1;
+ else
+ cts_prot = 0;
+
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_ENABLE_RTSCTS,
+ cts_prot);
+ if (ret)
+ ath10k_warn("Failed to set CTS prot for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "Set CTS prot: %d for VDEV: %d\n",
+ cts_prot, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ u32 slottime;
+ if (info->use_short_slot)
+ slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
+
+ else
+ slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
+
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_SLOT_TIME,
+ slottime);
+ if (ret)
+ ath10k_warn("Failed to set erp slot for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "Set slottime: %d for VDEV: %d\n",
+ slottime, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ u32 preamble;
+ if (info->use_short_preamble)
+ preamble = WMI_VDEV_PREAMBLE_SHORT;
+ else
+ preamble = WMI_VDEV_PREAMBLE_LONG;
+
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_PREAMBLE,
+ preamble);
+ if (ret)
+ ath10k_warn("Failed to set preamble for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "Set preamble: %d for VDEV: %d\n",
+ preamble, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ if (info->assoc)
+ ath10k_bss_assoc(hw, vif, info);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath10k_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct wmi_start_scan_arg arg;
+ int ret = 0;
+ int i;
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.in_progress) {
+ spin_unlock_bh(&ar->data_lock);
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ INIT_COMPLETION(ar->scan.started);
+ INIT_COMPLETION(ar->scan.completed);
+ ar->scan.in_progress = true;
+ ar->scan.aborting = false;
+ ar->scan.is_roc = false;
+ ar->scan.vdev_id = arvif->vdev_id;
+ spin_unlock_bh(&ar->data_lock);
+
+ memset(&arg, 0, sizeof(arg));
+ ath10k_wmi_start_scan_init(ar, &arg);
+ arg.vdev_id = arvif->vdev_id;
+ arg.scan_id = ATH10K_SCAN_ID;
+
+ if (!req->no_cck)
+ arg.scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES;
+
+ if (req->ie_len) {
+ arg.ie_len = req->ie_len;
+ memcpy(arg.ie, req->ie, arg.ie_len);
+ }
+
+ if (req->n_ssids) {
+ arg.n_ssids = req->n_ssids;
+ for (i = 0; i < arg.n_ssids; i++) {
+ arg.ssids[i].len = req->ssids[i].ssid_len;
+ arg.ssids[i].ssid = req->ssids[i].ssid;
+ }
+ }
+
+ if (req->n_channels) {
+ arg.n_channels = req->n_channels;
+ for (i = 0; i < arg.n_channels; i++)
+ arg.channels[i] = req->channels[i]->center_freq;
+ }
+
+ ret = ath10k_start_scan(ar, &arg);
+ if (ret) {
+ ath10k_warn("could not start hw scan (%d)\n", ret);
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.in_progress = false;
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+ ret = ath10k_abort_scan(ar);
+ if (ret) {
+ ath10k_warn("couldn't abort scan (%d). forcefully sending scan completion to mac80211\n",
+ ret);
+ ieee80211_scan_completed(hw, 1 /* aborted */);
+ }
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_peer *peer;
+ const u8 *peer_addr;
+ bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ key->cipher == WLAN_CIPHER_SUITE_WEP104;
+ int ret = 0;
+
+ if (key->keyidx > WMI_MAX_KEY_INDEX)
+ return -ENOSPC;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (sta)
+ peer_addr = sta->addr;
+ else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ peer_addr = vif->bss_conf.bssid;
+ else
+ peer_addr = vif->addr;
+
+ key->hw_key_idx = key->keyidx;
+
+ /* the peer should not disappear in mid-way (unless FW goes awry) since
+ * we already hold conf_mutex. we just make sure its there now. */
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!peer) {
+ if (cmd == SET_KEY) {
+ ath10k_warn("cannot install key for non-existent peer %pM\n",
+ peer_addr);
+ ret = -EOPNOTSUPP;
+ goto exit;
+ } else {
+ /* if the peer doesn't exist there is no key to disable
+ * anymore */
+ goto exit;
+ }
+ }
+
+ if (is_wep) {
+ if (cmd == SET_KEY)
+ arvif->wep_keys[key->keyidx] = key;
+ else
+ arvif->wep_keys[key->keyidx] = NULL;
+
+ if (cmd == DISABLE_KEY)
+ ath10k_clear_vdev_key(arvif, key);
+ }
+
+ ret = ath10k_install_key(arvif, key, cmd, peer_addr);
+ if (ret) {
+ ath10k_warn("ath10k_install_key failed (%d)\n", ret);
+ goto exit;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
+ if (peer && cmd == SET_KEY)
+ peer->keys[key->keyidx] = key;
+ else if (peer && cmd == DISABLE_KEY)
+ peer->keys[key->keyidx] = NULL;
+ else if (peer == NULL)
+ /* impossible unless FW goes crazy */
+ ath10k_warn("peer %pM disappeared!\n", peer_addr);
+ spin_unlock_bh(&ar->data_lock);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE &&
+ vif->type != NL80211_IFTYPE_STATION) {
+ /*
+ * New station addition.
+ */
+ ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
+ if (ret)
+ ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ else
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "Added peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ } else if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)) {
+ /*
+ * Existing station deletion.
+ */
+ ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ if (ret)
+ ath10k_warn("Failed to delete peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ else
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "Removed peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ ath10k_bss_disassoc(hw, vif);
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
+ /*
+ * New association.
+ */
+ ret = ath10k_station_assoc(ar, arvif, sta);
+ if (ret)
+ ath10k_warn("Failed to associate station: %pM\n",
+ sta->addr);
+ else
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "Station %pM moved to assoc state\n",
+ sta->addr);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
+ /*
+ * Disassociation.
+ */
+ ret = ath10k_station_disassoc(ar, arvif, sta);
+ if (ret)
+ ath10k_warn("Failed to disassociate station: %pM\n",
+ sta->addr);
+ else
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "Station %pM moved to disassociated state\n",
+ sta->addr);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
+ u16 ac, bool enable)
+{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ u32 value = 0;
+ int ret = 0;
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ return 0;
+
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
+ break;
+ case IEEE80211_AC_VI:
+ value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
+ break;
+ case IEEE80211_AC_BE:
+ value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
+ break;
+ case IEEE80211_AC_BK:
+ value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
+ break;
+ }
+
+ if (enable)
+ arvif->u.sta.uapsd |= value;
+ else
+ arvif->u.sta.uapsd &= ~value;
+
+ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ WMI_STA_PS_PARAM_UAPSD,
+ arvif->u.sta.uapsd);
+ if (ret) {
+ ath10k_warn("could not set uapsd params %d\n", ret);
+ goto exit;
+ }
+
+ if (arvif->u.sta.uapsd)
+ value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
+ else
+ value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
+
+ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ WMI_STA_PS_PARAM_RX_WAKE_POLICY,
+ value);
+ if (ret)
+ ath10k_warn("could not set rx wake param %d\n", ret);
+
+exit:
+ return ret;
+}
+
+static int ath10k_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct ath10k *ar = hw->priv;
+ struct wmi_wmm_params_arg *p = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ p = &ar->wmm_params.ac_vo;
+ break;
+ case IEEE80211_AC_VI:
+ p = &ar->wmm_params.ac_vi;
+ break;
+ case IEEE80211_AC_BE:
+ p = &ar->wmm_params.ac_be;
+ break;
+ case IEEE80211_AC_BK:
+ p = &ar->wmm_params.ac_bk;
+ break;
+ }
+
+ if (WARN_ON(!p)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ p->cwmin = params->cw_min;
+ p->cwmax = params->cw_max;
+ p->aifs = params->aifs;
+
+ /*
+ * The channel time duration programmed in the HW is in absolute
+ * microseconds, while mac80211 gives the txop in units of
+ * 32 microseconds.
+ */
+ p->txop = params->txop * 32;
+
+ /* FIXME: FW accepts wmm params per hw, not per vif */
+ ret = ath10k_wmi_pdev_set_wmm_params(ar, &ar->wmm_params);
+ if (ret) {
+ ath10k_warn("could not set wmm params %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
+ if (ret)
+ ath10k_warn("could not set sta uapsd %d\n", ret);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+#define ATH10K_ROC_TIMEOUT_HZ (2*HZ)
+
+static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan,
+ int duration,
+ enum ieee80211_roc_type type)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct wmi_start_scan_arg arg;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.in_progress) {
+ spin_unlock_bh(&ar->data_lock);
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ INIT_COMPLETION(ar->scan.started);
+ INIT_COMPLETION(ar->scan.completed);
+ INIT_COMPLETION(ar->scan.on_channel);
+ ar->scan.in_progress = true;
+ ar->scan.aborting = false;
+ ar->scan.is_roc = true;
+ ar->scan.vdev_id = arvif->vdev_id;
+ ar->scan.roc_freq = chan->center_freq;
+ spin_unlock_bh(&ar->data_lock);
+
+ memset(&arg, 0, sizeof(arg));
+ ath10k_wmi_start_scan_init(ar, &arg);
+ arg.vdev_id = arvif->vdev_id;
+ arg.scan_id = ATH10K_SCAN_ID;
+ arg.n_channels = 1;
+ arg.channels[0] = chan->center_freq;
+ arg.dwell_time_active = duration;
+ arg.dwell_time_passive = duration;
+ arg.max_scan_time = 2 * duration;
+ arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
+ arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
+
+ ret = ath10k_start_scan(ar, &arg);
+ if (ret) {
+ ath10k_warn("could not start roc scan (%d)\n", ret);
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.in_progress = false;
+ spin_unlock_bh(&ar->data_lock);
+ goto exit;
+ }
+
+ ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ);
+ if (ret == 0) {
+ ath10k_warn("could not switch to channel for roc scan\n");
+ ath10k_abort_scan(ar);
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+
+ ret = 0;
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
+{
+ struct ath10k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+ ath10k_abort_scan(ar);
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+/*
+ * Both RTS and Fragmentation threshold are interface-specific
+ * in ath10k, but device-specific in mac80211.
+ */
+static void ath10k_set_rts_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ath10k_generic_iter *ar_iter = data;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ u32 rts = ar_iter->ar->hw->wiphy->rts_threshold;
+
+ rts = min_t(u32, rts, ATH10K_RTS_MAX);
+
+ ar_iter->ret = ath10k_wmi_vdev_set_param(ar_iter->ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_RTS_THRESHOLD,
+ rts);
+ if (ar_iter->ret)
+ ath10k_warn("Failed to set RTS threshold for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "Set RTS threshold: %d for VDEV: %d\n",
+ rts, arvif->vdev_id);
+}
+
+static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct ath10k_generic_iter ar_iter;
+ struct ath10k *ar = hw->priv;
+
+ memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
+ ar_iter.ar = ar;
+
+ mutex_lock(&ar->conf_mutex);
+ ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath10k_set_rts_iter, &ar_iter);
+ mutex_unlock(&ar->conf_mutex);
+
+ return ar_iter.ret;
+}
+
+static void ath10k_set_frag_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ath10k_generic_iter *ar_iter = data;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ u32 frag = ar_iter->ar->hw->wiphy->frag_threshold;
+ int ret;
+
+ frag = clamp_t(u32, frag,
+ ATH10K_FRAGMT_THRESHOLD_MIN,
+ ATH10K_FRAGMT_THRESHOLD_MAX);
+
+ ret = ath10k_wmi_vdev_set_param(ar_iter->ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ frag);
+
+ ar_iter->ret = ret;
+ if (ar_iter->ret)
+ ath10k_warn("Failed to set frag threshold for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "Set frag threshold: %d for VDEV: %d\n",
+ frag, arvif->vdev_id);
+}
+
+static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct ath10k_generic_iter ar_iter;
+ struct ath10k *ar = hw->priv;
+
+ memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
+ ar_iter.ar = ar;
+
+ mutex_lock(&ar->conf_mutex);
+ ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath10k_set_frag_iter, &ar_iter);
+ mutex_unlock(&ar->conf_mutex);
+
+ return ar_iter.ret;
+}
+
+static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+{
+ struct ath10k *ar = hw->priv;
+ int ret;
+
+ /* mac80211 doesn't care if we really xmit queued frames or not
+ * we'll collect those frames either way if we stop/delete vdevs */
+ if (drop)
+ return;
+
+ ret = wait_event_timeout(ar->htt->empty_tx_wq, ({
+ bool empty;
+ spin_lock_bh(&ar->htt->tx_lock);
+ empty = bitmap_empty(ar->htt->used_msdu_ids,
+ ar->htt->max_num_pending_tx);
+ spin_unlock_bh(&ar->htt->tx_lock);
+ (empty);
+ }), ATH10K_FLUSH_TIMEOUT_HZ);
+ if (ret <= 0)
+ ath10k_warn("tx not flushed\n");
+}
+
+/* TODO: Implement this function properly
+ * For now it is needed to reply to Probe Requests in IBSS mode.
+ * Propably we need this information from FW.
+ */
+static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
+{
+ return 1;
+}
+
+static const struct ieee80211_ops ath10k_ops = {
+ .tx = ath10k_tx,
+ .start = ath10k_start,
+ .stop = ath10k_stop,
+ .config = ath10k_config,
+ .add_interface = ath10k_add_interface,
+ .remove_interface = ath10k_remove_interface,
+ .configure_filter = ath10k_configure_filter,
+ .bss_info_changed = ath10k_bss_info_changed,
+ .hw_scan = ath10k_hw_scan,
+ .cancel_hw_scan = ath10k_cancel_hw_scan,
+ .set_key = ath10k_set_key,
+ .sta_state = ath10k_sta_state,
+ .conf_tx = ath10k_conf_tx,
+ .remain_on_channel = ath10k_remain_on_channel,
+ .cancel_remain_on_channel = ath10k_cancel_remain_on_channel,
+ .set_rts_threshold = ath10k_set_rts_threshold,
+ .set_frag_threshold = ath10k_set_frag_threshold,
+ .flush = ath10k_flush,
+ .tx_last_beacon = ath10k_tx_last_beacon,
+};
+
+#define RATETAB_ENT(_rate, _rateid, _flags) { \
+ .bitrate = (_rate), \
+ .flags = (_flags), \
+ .hw_value = (_rateid), \
+}
+
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = IEEE80211_BAND_2GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_channel, _freq, _flags) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+static const struct ieee80211_channel ath10k_2ghz_channels[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0),
+};
+
+static const struct ieee80211_channel ath10k_5ghz_channels[] = {
+ CHAN5G(36, 5180, 0),
+ CHAN5G(40, 5200, 0),
+ CHAN5G(44, 5220, 0),
+ CHAN5G(48, 5240, 0),
+ CHAN5G(52, 5260, 0),
+ CHAN5G(56, 5280, 0),
+ CHAN5G(60, 5300, 0),
+ CHAN5G(64, 5320, 0),
+ CHAN5G(100, 5500, 0),
+ CHAN5G(104, 5520, 0),
+ CHAN5G(108, 5540, 0),
+ CHAN5G(112, 5560, 0),
+ CHAN5G(116, 5580, 0),
+ CHAN5G(120, 5600, 0),
+ CHAN5G(124, 5620, 0),
+ CHAN5G(128, 5640, 0),
+ CHAN5G(132, 5660, 0),
+ CHAN5G(136, 5680, 0),
+ CHAN5G(140, 5700, 0),
+ CHAN5G(149, 5745, 0),
+ CHAN5G(153, 5765, 0),
+ CHAN5G(157, 5785, 0),
+ CHAN5G(161, 5805, 0),
+ CHAN5G(165, 5825, 0),
+};
+
+static struct ieee80211_rate ath10k_rates[] = {
+ /* CCK */
+ RATETAB_ENT(10, 0x82, 0),
+ RATETAB_ENT(20, 0x84, 0),
+ RATETAB_ENT(55, 0x8b, 0),
+ RATETAB_ENT(110, 0x96, 0),
+ /* OFDM */
+ RATETAB_ENT(60, 0x0c, 0),
+ RATETAB_ENT(90, 0x12, 0),
+ RATETAB_ENT(120, 0x18, 0),
+ RATETAB_ENT(180, 0x24, 0),
+ RATETAB_ENT(240, 0x30, 0),
+ RATETAB_ENT(360, 0x48, 0),
+ RATETAB_ENT(480, 0x60, 0),
+ RATETAB_ENT(540, 0x6c, 0),
+};
+
+#define ath10k_a_rates (ath10k_rates + 4)
+#define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - 4)
+#define ath10k_g_rates (ath10k_rates + 0)
+#define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
+
+struct ath10k *ath10k_mac_create(void)
+{
+ struct ieee80211_hw *hw;
+ struct ath10k *ar;
+
+ hw = ieee80211_alloc_hw(sizeof(struct ath10k), &ath10k_ops);
+ if (!hw)
+ return NULL;
+
+ ar = hw->priv;
+ ar->hw = hw;
+
+ return ar;
+}
+
+void ath10k_mac_destroy(struct ath10k *ar)
+{
+ ieee80211_free_hw(ar->hw);
+}
+
+static const struct ieee80211_iface_limit ath10k_if_limits[] = {
+ {
+ .max = 8,
+ .types = BIT(NL80211_IFTYPE_STATION)
+ | BIT(NL80211_IFTYPE_P2P_CLIENT)
+ | BIT(NL80211_IFTYPE_P2P_GO)
+ | BIT(NL80211_IFTYPE_AP)
+ }
+};
+
+static const struct ieee80211_iface_combination ath10k_if_comb = {
+ .limits = ath10k_if_limits,
+ .n_limits = ARRAY_SIZE(ath10k_if_limits),
+ .max_interfaces = 8,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+};
+
+static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
+{
+ struct ieee80211_sta_vht_cap vht_cap = {0};
+ u16 mcs_map;
+
+ vht_cap.vht_supported = 1;
+ vht_cap.cap = ar->vht_cap_info;
+
+ /* FIXME: check dynamically how many streams board supports */
+ mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 14;
+
+ vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
+ vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
+
+ return vht_cap;
+}
+
+static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
+{
+ int i;
+ struct ieee80211_sta_ht_cap ht_cap = {0};
+
+ if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED))
+ return ht_cap;
+
+ ht_cap.ht_supported = 1;
+ ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
+ ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
+ ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI)
+ ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
+
+ if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI)
+ ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+
+ if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) {
+ u32 smps;
+
+ smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
+ smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ ht_cap.cap |= smps;
+ }
+
+ if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC)
+ ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
+
+ if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) {
+ u32 stbc;
+
+ stbc = ar->ht_cap_info;
+ stbc &= WMI_HT_CAP_RX_STBC;
+ stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
+ stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ stbc &= IEEE80211_HT_CAP_RX_STBC;
+
+ ht_cap.cap |= stbc;
+ }
+
+ if (ar->ht_cap_info & WMI_HT_CAP_LDPC)
+ ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+ if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT)
+ ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
+
+ /* max AMSDU is implicitly taken from vht_cap_info */
+ if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
+ ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+ for (i = 0; i < WMI_MAX_SPATIAL_STREAM; i++)
+ ht_cap.mcs.rx_mask[i] = 0xFF;
+
+ ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
+
+ return ht_cap;
+}
+
+
+static void ath10k_get_arvif_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_vif_iter *arvif_iter = data;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+ if (arvif->vdev_id == arvif_iter->vdev_id)
+ arvif_iter->arvif = arvif;
+}
+
+struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
+{
+ struct ath10k_vif_iter arvif_iter;
+ u32 flags;
+
+ memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter));
+ arvif_iter.vdev_id = vdev_id;
+
+ flags = IEEE80211_IFACE_ITER_RESUME_ALL;
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ flags,
+ ath10k_get_arvif_iter,
+ &arvif_iter);
+ if (!arvif_iter.arvif) {
+ ath10k_warn("No VIF found for VDEV: %d\n", vdev_id);
+ return NULL;
+ }
+
+ return arvif_iter.arvif;
+}
+
+int ath10k_mac_register(struct ath10k *ar)
+{
+ struct ieee80211_supported_band *band;
+ struct ieee80211_sta_vht_cap vht_cap;
+ struct ieee80211_sta_ht_cap ht_cap;
+ void *channels;
+ int ret;
+
+ SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
+
+ SET_IEEE80211_DEV(ar->hw, ar->dev);
+
+ ht_cap = ath10k_get_ht_cap(ar);
+ vht_cap = ath10k_create_vht_cap(ar);
+
+ if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
+ channels = kmemdup(ath10k_2ghz_channels,
+ sizeof(ath10k_2ghz_channels),
+ GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
+ band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
+ band->channels = channels;
+ band->n_bitrates = ath10k_g_rates_size;
+ band->bitrates = ath10k_g_rates;
+ band->ht_cap = ht_cap;
+
+ /* vht is not supported in 2.4 GHz */
+
+ ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band;
+ }
+
+ if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
+ channels = kmemdup(ath10k_5ghz_channels,
+ sizeof(ath10k_5ghz_channels),
+ GFP_KERNEL);
+ if (!channels) {
+ if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
+ band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
+ kfree(band->channels);
+ }
+ return -ENOMEM;
+ }
+
+ band = &ar->mac.sbands[IEEE80211_BAND_5GHZ];
+ band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels);
+ band->channels = channels;
+ band->n_bitrates = ath10k_a_rates_size;
+ band->bitrates = ath10k_a_rates;
+ band->ht_cap = ht_cap;
+ band->vht_cap = vht_cap;
+ ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = band;
+ }
+
+ ar->hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO);
+
+ ar->hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
+ IEEE80211_HW_SUPPORTS_UAPSD |
+ IEEE80211_HW_MFP_CAPABLE |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+ IEEE80211_HW_HAS_RATE_CONTROL |
+ IEEE80211_HW_SUPPORTS_STATIC_SMPS |
+ IEEE80211_HW_WANT_MONITOR_VIF |
+ IEEE80211_HW_AP_LINK_PS;
+
+ if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
+ ar->hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS;
+
+ if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) {
+ ar->hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+ ar->hw->flags |= IEEE80211_HW_TX_AMPDU_SETUP_IN_HW;
+ }
+
+ ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
+ ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
+
+ ar->hw->vif_data_size = sizeof(struct ath10k_vif);
+
+ ar->hw->channel_change_time = 5000;
+ ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
+
+ ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ ar->hw->wiphy->max_remain_on_channel_duration = 5000;
+
+ ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+ /*
+ * on LL hardware queues are managed entirely by the FW
+ * so we only advertise to mac we can do the queues thing
+ */
+ ar->hw->queues = 4;
+
+ ar->hw->wiphy->iface_combinations = &ath10k_if_comb;
+ ar->hw->wiphy->n_iface_combinations = 1;
+
+ ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
+ ath10k_reg_notifier);
+ if (ret) {
+ ath10k_err("Regulatory initialization failed\n");
+ return ret;
+ }
+
+ ret = ieee80211_register_hw(ar->hw);
+ if (ret) {
+ ath10k_err("ieee80211 registration failed: %d\n", ret);
+ return ret;
+ }
+
+ if (!ath_is_world_regd(&ar->ath_common.regulatory)) {
+ ret = regulatory_hint(ar->hw->wiphy,
+ ar->ath_common.regulatory.alpha2);
+ if (ret)
+ goto exit;
+ }
+
+ return 0;
+exit:
+ ieee80211_unregister_hw(ar->hw);
+ return ret;
+}
+
+void ath10k_mac_unregister(struct ath10k *ar)
+{
+ ieee80211_unregister_hw(ar->hw);
+
+ kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
+
+ SET_IEEE80211_DEV(ar->hw, NULL);
+}
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
new file mode 100644
index 000000000000..27fc92e58829
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _MAC_H_
+#define _MAC_H_
+
+#include <net/mac80211.h>
+#include "core.h"
+
+struct ath10k_generic_iter {
+ struct ath10k *ar;
+ int ret;
+};
+
+struct ath10k *ath10k_mac_create(void);
+void ath10k_mac_destroy(struct ath10k *ar);
+int ath10k_mac_register(struct ath10k *ar);
+void ath10k_mac_unregister(struct ath10k *ar);
+struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id);
+void ath10k_reset_scan(unsigned long ptr);
+void ath10k_offchan_tx_purge(struct ath10k *ar);
+void ath10k_offchan_tx_work(struct work_struct *work);
+
+static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
+{
+ return (struct ath10k_vif *)vif->drv_priv;
+}
+
+static inline void ath10k_tx_h_seq_no(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
+ if (arvif->tx_seq_no == 0)
+ arvif->tx_seq_no = 0x1000;
+
+ if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
+ arvif->tx_seq_no += 0x10;
+ hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(arvif->tx_seq_no);
+ }
+}
+
+#endif /* _MAC_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
new file mode 100644
index 000000000000..33af4672c909
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -0,0 +1,2507 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+
+#include "core.h"
+#include "debug.h"
+
+#include "targaddrs.h"
+#include "bmi.h"
+
+#include "hif.h"
+#include "htc.h"
+
+#include "ce.h"
+#include "pci.h"
+
+unsigned int ath10k_target_ps;
+module_param(ath10k_target_ps, uint, 0644);
+MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
+
+#define QCA988X_1_0_DEVICE_ID (0xabcd)
+#define QCA988X_2_0_DEVICE_ID (0x003c)
+
+static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
+ { PCI_VDEVICE(ATHEROS, QCA988X_1_0_DEVICE_ID) }, /* PCI-E QCA988X V1 */
+ { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
+ {0}
+};
+
+static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
+ u32 *data);
+
+static void ath10k_pci_process_ce(struct ath10k *ar);
+static int ath10k_pci_post_rx(struct ath10k *ar);
+static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
+ int num);
+static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info);
+static void ath10k_pci_stop_ce(struct ath10k *ar);
+
+static const struct ce_attr host_ce_config_wlan[] = {
+ /* host->target HTC control and raw streams */
+ { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,},
+ /* could be moved to share CE3 */
+ /* target->host HTT + HTC control */
+ { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL,},
+ /* target->host WMI */
+ { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,},
+ /* host->target WMI */
+ { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,},
+ /* host->target HTT */
+ { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 0,
+ CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,},
+ /* unused */
+ { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
+ /* Target autonomous hif_memcpy */
+ { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
+ /* ce_diag, the Diagnostic Window */
+ { /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,},
+};
+
+/* Target firmware's Copy Engine configuration. */
+static const struct ce_pipe_config target_ce_config_wlan[] = {
+ /* host->target HTC control and raw streams */
+ { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,},
+ /* target->host HTT + HTC control */
+ { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,},
+ /* target->host WMI */
+ { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,},
+ /* host->target WMI */
+ { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
+ /* host->target HTT */
+ { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,},
+ /* NB: 50% of src nentries, since tx has 2 frags */
+ /* unused */
+ { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
+ /* Reserved for target autonomous hif_memcpy */
+ { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,},
+ /* CE7 used only by Host */
+};
+
+/*
+ * Diagnostic read/write access is provided for startup/config/debug usage.
+ * Caller must guarantee proper alignment, when applicable, and single user
+ * at any moment.
+ */
+static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
+ int nbytes)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret = 0;
+ u32 buf;
+ unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
+ unsigned int id;
+ unsigned int flags;
+ struct ce_state *ce_diag;
+ /* Host buffer address in CE space */
+ u32 ce_data;
+ dma_addr_t ce_data_base = 0;
+ void *data_buf = NULL;
+ int i;
+
+ /*
+ * This code cannot handle reads to non-memory space. Redirect to the
+ * register read fn but preserve the multi word read capability of
+ * this fn
+ */
+ if (address < DRAM_BASE_ADDRESS) {
+ if (!IS_ALIGNED(address, 4) ||
+ !IS_ALIGNED((unsigned long)data, 4))
+ return -EIO;
+
+ while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
+ ar, address, (u32 *)data)) == 0)) {
+ nbytes -= sizeof(u32);
+ address += sizeof(u32);
+ data += sizeof(u32);
+ }
+ return ret;
+ }
+
+ ce_diag = ar_pci->ce_diag;
+
+ /*
+ * Allocate a temporary bounce buffer to hold caller's data
+ * to be DMA'ed from Target. This guarantees
+ * 1) 4-byte alignment
+ * 2) Buffer in DMA-able space
+ */
+ orig_nbytes = nbytes;
+ data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
+ orig_nbytes,
+ &ce_data_base);
+
+ if (!data_buf) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ memset(data_buf, 0, orig_nbytes);
+
+ remaining_bytes = orig_nbytes;
+ ce_data = ce_data_base;
+ while (remaining_bytes) {
+ nbytes = min_t(unsigned int, remaining_bytes,
+ DIAG_TRANSFER_LIMIT);
+
+ ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
+ if (ret != 0)
+ goto done;
+
+ /* Request CE to send from Target(!) address to Host buffer */
+ /*
+ * The address supplied by the caller is in the
+ * Target CPU virtual address space.
+ *
+ * In order to use this address with the diagnostic CE,
+ * convert it from Target CPU virtual address space
+ * to CE address space
+ */
+ ath10k_pci_wake(ar);
+ address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
+ address);
+ ath10k_pci_sleep(ar);
+
+ ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
+ 0);
+ if (ret)
+ goto done;
+
+ i = 0;
+ while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
+ &completed_nbytes,
+ &id) != 0) {
+ mdelay(1);
+ if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+ ret = -EBUSY;
+ goto done;
+ }
+ }
+
+ if (nbytes != completed_nbytes) {
+ ret = -EIO;
+ goto done;
+ }
+
+ if (buf != (u32) address) {
+ ret = -EIO;
+ goto done;
+ }
+
+ i = 0;
+ while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
+ &completed_nbytes,
+ &id, &flags) != 0) {
+ mdelay(1);
+
+ if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+ ret = -EBUSY;
+ goto done;
+ }
+ }
+
+ if (nbytes != completed_nbytes) {
+ ret = -EIO;
+ goto done;
+ }
+
+ if (buf != ce_data) {
+ ret = -EIO;
+ goto done;
+ }
+
+ remaining_bytes -= nbytes;
+ address += nbytes;
+ ce_data += nbytes;
+ }
+
+done:
+ if (ret == 0) {
+ /* Copy data from allocated DMA buf to caller's buf */
+ WARN_ON_ONCE(orig_nbytes & 3);
+ for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
+ ((u32 *)data)[i] =
+ __le32_to_cpu(((__le32 *)data_buf)[i]);
+ }
+ } else
+ ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
+ __func__, address);
+
+ if (data_buf)
+ pci_free_consistent(ar_pci->pdev, orig_nbytes,
+ data_buf, ce_data_base);
+
+ return ret;
+}
+
+/* Read 4-byte aligned data from Target memory or register */
+static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
+ u32 *data)
+{
+ /* Assume range doesn't cross this boundary */
+ if (address >= DRAM_BASE_ADDRESS)
+ return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
+
+ ath10k_pci_wake(ar);
+ *data = ath10k_pci_read32(ar, address);
+ ath10k_pci_sleep(ar);
+ return 0;
+}
+
+static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
+ const void *data, int nbytes)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret = 0;
+ u32 buf;
+ unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
+ unsigned int id;
+ unsigned int flags;
+ struct ce_state *ce_diag;
+ void *data_buf = NULL;
+ u32 ce_data; /* Host buffer address in CE space */
+ dma_addr_t ce_data_base = 0;
+ int i;
+
+ ce_diag = ar_pci->ce_diag;
+
+ /*
+ * Allocate a temporary bounce buffer to hold caller's data
+ * to be DMA'ed to Target. This guarantees
+ * 1) 4-byte alignment
+ * 2) Buffer in DMA-able space
+ */
+ orig_nbytes = nbytes;
+ data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
+ orig_nbytes,
+ &ce_data_base);
+ if (!data_buf) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ /* Copy caller's data to allocated DMA buf */
+ WARN_ON_ONCE(orig_nbytes & 3);
+ for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
+ ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
+
+ /*
+ * The address supplied by the caller is in the
+ * Target CPU virtual address space.
+ *
+ * In order to use this address with the diagnostic CE,
+ * convert it from
+ * Target CPU virtual address space
+ * to
+ * CE address space
+ */
+ ath10k_pci_wake(ar);
+ address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
+ ath10k_pci_sleep(ar);
+
+ remaining_bytes = orig_nbytes;
+ ce_data = ce_data_base;
+ while (remaining_bytes) {
+ /* FIXME: check cast */
+ nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
+
+ /* Set up to receive directly into Target(!) address */
+ ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
+ if (ret != 0)
+ goto done;
+
+ /*
+ * Request CE to send caller-supplied data that
+ * was copied to bounce buffer to Target(!) address.
+ */
+ ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
+ nbytes, 0, 0);
+ if (ret != 0)
+ goto done;
+
+ i = 0;
+ while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
+ &completed_nbytes,
+ &id) != 0) {
+ mdelay(1);
+
+ if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+ ret = -EBUSY;
+ goto done;
+ }
+ }
+
+ if (nbytes != completed_nbytes) {
+ ret = -EIO;
+ goto done;
+ }
+
+ if (buf != ce_data) {
+ ret = -EIO;
+ goto done;
+ }
+
+ i = 0;
+ while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
+ &completed_nbytes,
+ &id, &flags) != 0) {
+ mdelay(1);
+
+ if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+ ret = -EBUSY;
+ goto done;
+ }
+ }
+
+ if (nbytes != completed_nbytes) {
+ ret = -EIO;
+ goto done;
+ }
+
+ if (buf != address) {
+ ret = -EIO;
+ goto done;
+ }
+
+ remaining_bytes -= nbytes;
+ address += nbytes;
+ ce_data += nbytes;
+ }
+
+done:
+ if (data_buf) {
+ pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
+ ce_data_base);
+ }
+
+ if (ret != 0)
+ ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
+ address);
+
+ return ret;
+}
+
+/* Write 4B data to Target memory or register */
+static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
+ u32 data)
+{
+ /* Assume range doesn't cross this boundary */
+ if (address >= DRAM_BASE_ADDRESS)
+ return ath10k_pci_diag_write_mem(ar, address, &data,
+ sizeof(u32));
+
+ ath10k_pci_wake(ar);
+ ath10k_pci_write32(ar, address, data);
+ ath10k_pci_sleep(ar);
+ return 0;
+}
+
+static bool ath10k_pci_target_is_awake(struct ath10k *ar)
+{
+ void __iomem *mem = ath10k_pci_priv(ar)->mem;
+ u32 val;
+ val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
+ RTC_STATE_ADDRESS);
+ return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
+}
+
+static void ath10k_pci_wait(struct ath10k *ar)
+{
+ int n = 100;
+
+ while (n-- && !ath10k_pci_target_is_awake(ar))
+ msleep(10);
+
+ if (n < 0)
+ ath10k_warn("Unable to wakeup target\n");
+}
+
+void ath10k_do_pci_wake(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ void __iomem *pci_addr = ar_pci->mem;
+ int tot_delay = 0;
+ int curr_delay = 5;
+
+ if (atomic_read(&ar_pci->keep_awake_count) == 0) {
+ /* Force AWAKE */
+ iowrite32(PCIE_SOC_WAKE_V_MASK,
+ pci_addr + PCIE_LOCAL_BASE_ADDRESS +
+ PCIE_SOC_WAKE_ADDRESS);
+ }
+ atomic_inc(&ar_pci->keep_awake_count);
+
+ if (ar_pci->verified_awake)
+ return;
+
+ for (;;) {
+ if (ath10k_pci_target_is_awake(ar)) {
+ ar_pci->verified_awake = true;
+ break;
+ }
+
+ if (tot_delay > PCIE_WAKE_TIMEOUT) {
+ ath10k_warn("target takes too long to wake up (awake count %d)\n",
+ atomic_read(&ar_pci->keep_awake_count));
+ break;
+ }
+
+ udelay(curr_delay);
+ tot_delay += curr_delay;
+
+ if (curr_delay < 50)
+ curr_delay += 5;
+ }
+}
+
+void ath10k_do_pci_sleep(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ void __iomem *pci_addr = ar_pci->mem;
+
+ if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
+ /* Allow sleep */
+ ar_pci->verified_awake = false;
+ iowrite32(PCIE_SOC_WAKE_RESET,
+ pci_addr + PCIE_LOCAL_BASE_ADDRESS +
+ PCIE_SOC_WAKE_ADDRESS);
+ }
+}
+
+/*
+ * FIXME: Handle OOM properly.
+ */
+static inline
+struct ath10k_pci_compl *get_free_compl(struct hif_ce_pipe_info *pipe_info)
+{
+ struct ath10k_pci_compl *compl = NULL;
+
+ spin_lock_bh(&pipe_info->pipe_lock);
+ if (list_empty(&pipe_info->compl_free)) {
+ ath10k_warn("Completion buffers are full\n");
+ goto exit;
+ }
+ compl = list_first_entry(&pipe_info->compl_free,
+ struct ath10k_pci_compl, list);
+ list_del(&compl->list);
+exit:
+ spin_unlock_bh(&pipe_info->pipe_lock);
+ return compl;
+}
+
+/* Called by lower (CE) layer when a send to Target completes. */
+static void ath10k_pci_ce_send_done(struct ce_state *ce_state,
+ void *transfer_context,
+ u32 ce_data,
+ unsigned int nbytes,
+ unsigned int transfer_id)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id];
+ struct ath10k_pci_compl *compl;
+ bool process = false;
+
+ do {
+ /*
+ * For the send completion of an item in sendlist, just
+ * increment num_sends_allowed. The upper layer callback will
+ * be triggered when last fragment is done with send.
+ */
+ if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
+ spin_lock_bh(&pipe_info->pipe_lock);
+ pipe_info->num_sends_allowed++;
+ spin_unlock_bh(&pipe_info->pipe_lock);
+ continue;
+ }
+
+ compl = get_free_compl(pipe_info);
+ if (!compl)
+ break;
+
+ compl->send_or_recv = HIF_CE_COMPLETE_SEND;
+ compl->ce_state = ce_state;
+ compl->pipe_info = pipe_info;
+ compl->transfer_context = transfer_context;
+ compl->nbytes = nbytes;
+ compl->transfer_id = transfer_id;
+ compl->flags = 0;
+
+ /*
+ * Add the completion to the processing queue.
+ */
+ spin_lock_bh(&ar_pci->compl_lock);
+ list_add_tail(&compl->list, &ar_pci->compl_process);
+ spin_unlock_bh(&ar_pci->compl_lock);
+
+ process = true;
+ } while (ath10k_ce_completed_send_next(ce_state,
+ &transfer_context,
+ &ce_data, &nbytes,
+ &transfer_id) == 0);
+
+ /*
+ * If only some of the items within a sendlist have completed,
+ * don't invoke completion processing until the entire sendlist
+ * has been sent.
+ */
+ if (!process)
+ return;
+
+ ath10k_pci_process_ce(ar);
+}
+
+/* Called by lower (CE) layer when data is received from the Target. */
+static void ath10k_pci_ce_recv_data(struct ce_state *ce_state,
+ void *transfer_context, u32 ce_data,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id];
+ struct ath10k_pci_compl *compl;
+ struct sk_buff *skb;
+
+ do {
+ compl = get_free_compl(pipe_info);
+ if (!compl)
+ break;
+
+ compl->send_or_recv = HIF_CE_COMPLETE_RECV;
+ compl->ce_state = ce_state;
+ compl->pipe_info = pipe_info;
+ compl->transfer_context = transfer_context;
+ compl->nbytes = nbytes;
+ compl->transfer_id = transfer_id;
+ compl->flags = flags;
+
+ skb = transfer_context;
+ dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ /*
+ * Add the completion to the processing queue.
+ */
+ spin_lock_bh(&ar_pci->compl_lock);
+ list_add_tail(&compl->list, &ar_pci->compl_process);
+ spin_unlock_bh(&ar_pci->compl_lock);
+
+ } while (ath10k_ce_completed_recv_next(ce_state,
+ &transfer_context,
+ &ce_data, &nbytes,
+ &transfer_id,
+ &flags) == 0);
+
+ ath10k_pci_process_ce(ar);
+}
+
+/* Send the first nbytes bytes of the buffer */
+static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
+ unsigned int transfer_id,
+ unsigned int bytes, struct sk_buff *nbuf)
+{
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe_id]);
+ struct ce_state *ce_hdl = pipe_info->ce_hdl;
+ struct ce_sendlist sendlist;
+ unsigned int len;
+ u32 flags = 0;
+ int ret;
+
+ memset(&sendlist, 0, sizeof(struct ce_sendlist));
+
+ len = min(bytes, nbuf->len);
+ bytes -= len;
+
+ if (len & 3)
+ ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
+
+ ath10k_dbg(ATH10K_DBG_PCI,
+ "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
+ nbuf->data, (unsigned long long) skb_cb->paddr,
+ nbuf->len, len);
+ ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
+ "ath10k tx: data: ",
+ nbuf->data, nbuf->len);
+
+ ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
+
+ /* Make sure we have resources to handle this request */
+ spin_lock_bh(&pipe_info->pipe_lock);
+ if (!pipe_info->num_sends_allowed) {
+ ath10k_warn("Pipe: %d is full\n", pipe_id);
+ spin_unlock_bh(&pipe_info->pipe_lock);
+ return -ENOSR;
+ }
+ pipe_info->num_sends_allowed--;
+ spin_unlock_bh(&pipe_info->pipe_lock);
+
+ ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
+ if (ret)
+ ath10k_warn("CE send failed: %p\n", nbuf);
+
+ return ret;
+}
+
+static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe]);
+ int ret;
+
+ spin_lock_bh(&pipe_info->pipe_lock);
+ ret = pipe_info->num_sends_allowed;
+ spin_unlock_bh(&pipe_info->pipe_lock);
+
+ return ret;
+}
+
+static void ath10k_pci_hif_dump_area(struct ath10k *ar)
+{
+ u32 reg_dump_area = 0;
+ u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
+ u32 host_addr;
+ int ret;
+ u32 i;
+
+ ath10k_err("firmware crashed!\n");
+ ath10k_err("hardware name %s version 0x%x\n",
+ ar->hw_params.name, ar->target_version);
+ ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
+ ar->fw_version_minor, ar->fw_version_release,
+ ar->fw_version_build);
+
+ host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
+ if (ath10k_pci_diag_read_mem(ar, host_addr,
+ &reg_dump_area, sizeof(u32)) != 0) {
+ ath10k_warn("could not read hi_failure_state\n");
+ return;
+ }
+
+ ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
+
+ ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
+ &reg_dump_values[0],
+ REG_DUMP_COUNT_QCA988X * sizeof(u32));
+ if (ret != 0) {
+ ath10k_err("could not dump FW Dump Area\n");
+ return;
+ }
+
+ BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
+
+ ath10k_err("target Register Dump\n");
+ for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
+ ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ i,
+ reg_dump_values[i],
+ reg_dump_values[i + 1],
+ reg_dump_values[i + 2],
+ reg_dump_values[i + 3]);
+}
+
+static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
+ int force)
+{
+ if (!force) {
+ int resources;
+ /*
+ * Decide whether to actually poll for completions, or just
+ * wait for a later chance.
+ * If there seem to be plenty of resources left, then just wait
+ * since checking involves reading a CE register, which is a
+ * relatively expensive operation.
+ */
+ resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
+
+ /*
+ * If at least 50% of the total resources are still available,
+ * don't bother checking again yet.
+ */
+ if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
+ return;
+ }
+ ath10k_ce_per_engine_service(ar, pipe);
+}
+
+static void ath10k_pci_hif_post_init(struct ath10k *ar,
+ struct ath10k_hif_cb *callbacks)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+
+ memcpy(&ar_pci->msg_callbacks_current, callbacks,
+ sizeof(ar_pci->msg_callbacks_current));
+}
+
+static int ath10k_pci_start_ce(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ce_state *ce_diag = ar_pci->ce_diag;
+ const struct ce_attr *attr;
+ struct hif_ce_pipe_info *pipe_info;
+ struct ath10k_pci_compl *compl;
+ int i, pipe_num, completions, disable_interrupts;
+
+ spin_lock_init(&ar_pci->compl_lock);
+ INIT_LIST_HEAD(&ar_pci->compl_process);
+
+ for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+ pipe_info = &ar_pci->pipe_info[pipe_num];
+
+ spin_lock_init(&pipe_info->pipe_lock);
+ INIT_LIST_HEAD(&pipe_info->compl_free);
+
+ /* Handle Diagnostic CE specially */
+ if (pipe_info->ce_hdl == ce_diag)
+ continue;
+
+ attr = &host_ce_config_wlan[pipe_num];
+ completions = 0;
+
+ if (attr->src_nentries) {
+ disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
+ ath10k_ce_send_cb_register(pipe_info->ce_hdl,
+ ath10k_pci_ce_send_done,
+ disable_interrupts);
+ completions += attr->src_nentries;
+ pipe_info->num_sends_allowed = attr->src_nentries - 1;
+ }
+
+ if (attr->dest_nentries) {
+ ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
+ ath10k_pci_ce_recv_data);
+ completions += attr->dest_nentries;
+ }
+
+ if (completions == 0)
+ continue;
+
+ for (i = 0; i < completions; i++) {
+ compl = kmalloc(sizeof(struct ath10k_pci_compl),
+ GFP_KERNEL);
+ if (!compl) {
+ ath10k_warn("No memory for completion state\n");
+ ath10k_pci_stop_ce(ar);
+ return -ENOMEM;
+ }
+
+ compl->send_or_recv = HIF_CE_COMPLETE_FREE;
+ list_add_tail(&compl->list, &pipe_info->compl_free);
+ }
+ }
+
+ return 0;
+}
+
+static void ath10k_pci_stop_ce(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_pci_compl *compl;
+ struct sk_buff *skb;
+ int i;
+
+ ath10k_ce_disable_interrupts(ar);
+
+ /* Cancel the pending tasklet */
+ tasklet_kill(&ar_pci->intr_tq);
+
+ for (i = 0; i < CE_COUNT; i++)
+ tasklet_kill(&ar_pci->pipe_info[i].intr);
+
+ /* Mark pending completions as aborted, so that upper layers free up
+ * their associated resources */
+ spin_lock_bh(&ar_pci->compl_lock);
+ list_for_each_entry(compl, &ar_pci->compl_process, list) {
+ skb = (struct sk_buff *)compl->transfer_context;
+ ATH10K_SKB_CB(skb)->is_aborted = true;
+ }
+ spin_unlock_bh(&ar_pci->compl_lock);
+}
+
+static void ath10k_pci_cleanup_ce(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_pci_compl *compl, *tmp;
+ struct hif_ce_pipe_info *pipe_info;
+ struct sk_buff *netbuf;
+ int pipe_num;
+
+ /* Free pending completions. */
+ spin_lock_bh(&ar_pci->compl_lock);
+ if (!list_empty(&ar_pci->compl_process))
+ ath10k_warn("pending completions still present! possible memory leaks.\n");
+
+ list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
+ list_del(&compl->list);
+ netbuf = (struct sk_buff *)compl->transfer_context;
+ dev_kfree_skb_any(netbuf);
+ kfree(compl);
+ }
+ spin_unlock_bh(&ar_pci->compl_lock);
+
+ /* Free unused completions for each pipe. */
+ for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+ pipe_info = &ar_pci->pipe_info[pipe_num];
+
+ spin_lock_bh(&pipe_info->pipe_lock);
+ list_for_each_entry_safe(compl, tmp,
+ &pipe_info->compl_free, list) {
+ list_del(&compl->list);
+ kfree(compl);
+ }
+ spin_unlock_bh(&pipe_info->pipe_lock);
+ }
+}
+
+static void ath10k_pci_process_ce(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ar->hif.priv;
+ struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
+ struct ath10k_pci_compl *compl;
+ struct sk_buff *skb;
+ unsigned int nbytes;
+ int ret, send_done = 0;
+
+ /* Upper layers aren't ready to handle tx/rx completions in parallel so
+ * we must serialize all completion processing. */
+
+ spin_lock_bh(&ar_pci->compl_lock);
+ if (ar_pci->compl_processing) {
+ spin_unlock_bh(&ar_pci->compl_lock);
+ return;
+ }
+ ar_pci->compl_processing = true;
+ spin_unlock_bh(&ar_pci->compl_lock);
+
+ for (;;) {
+ spin_lock_bh(&ar_pci->compl_lock);
+ if (list_empty(&ar_pci->compl_process)) {
+ spin_unlock_bh(&ar_pci->compl_lock);
+ break;
+ }
+ compl = list_first_entry(&ar_pci->compl_process,
+ struct ath10k_pci_compl, list);
+ list_del(&compl->list);
+ spin_unlock_bh(&ar_pci->compl_lock);
+
+ if (compl->send_or_recv == HIF_CE_COMPLETE_SEND) {
+ cb->tx_completion(ar,
+ compl->transfer_context,
+ compl->transfer_id);
+ send_done = 1;
+ } else {
+ ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
+ if (ret) {
+ ath10k_warn("Unable to post recv buffer for pipe: %d\n",
+ compl->pipe_info->pipe_num);
+ break;
+ }
+
+ skb = (struct sk_buff *)compl->transfer_context;
+ nbytes = compl->nbytes;
+
+ ath10k_dbg(ATH10K_DBG_PCI,
+ "ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n",
+ skb, nbytes);
+ ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
+ "ath10k rx: ", skb->data, nbytes);
+
+ if (skb->len + skb_tailroom(skb) >= nbytes) {
+ skb_trim(skb, 0);
+ skb_put(skb, nbytes);
+ cb->rx_completion(ar, skb,
+ compl->pipe_info->pipe_num);
+ } else {
+ ath10k_warn("rxed more than expected (nbytes %d, max %d)",
+ nbytes,
+ skb->len + skb_tailroom(skb));
+ }
+ }
+
+ compl->send_or_recv = HIF_CE_COMPLETE_FREE;
+
+ /*
+ * Add completion back to the pipe's free list.
+ */
+ spin_lock_bh(&compl->pipe_info->pipe_lock);
+ list_add_tail(&compl->list, &compl->pipe_info->compl_free);
+ compl->pipe_info->num_sends_allowed += send_done;
+ spin_unlock_bh(&compl->pipe_info->pipe_lock);
+ }
+
+ spin_lock_bh(&ar_pci->compl_lock);
+ ar_pci->compl_processing = false;
+ spin_unlock_bh(&ar_pci->compl_lock);
+}
+
+/* TODO - temporary mapping while we have too few CE's */
+static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
+ u16 service_id, u8 *ul_pipe,
+ u8 *dl_pipe, int *ul_is_polled,
+ int *dl_is_polled)
+{
+ int ret = 0;
+
+ /* polling for received messages not supported */
+ *dl_is_polled = 0;
+
+ switch (service_id) {
+ case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
+ /*
+ * Host->target HTT gets its own pipe, so it can be polled
+ * while other pipes are interrupt driven.
+ */
+ *ul_pipe = 4;
+ /*
+ * Use the same target->host pipe for HTC ctrl, HTC raw
+ * streams, and HTT.
+ */
+ *dl_pipe = 1;
+ break;
+
+ case ATH10K_HTC_SVC_ID_RSVD_CTRL:
+ case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
+ /*
+ * Note: HTC_RAW_STREAMS_SVC is currently unused, and
+ * HTC_CTRL_RSVD_SVC could share the same pipe as the
+ * WMI services. So, if another CE is needed, change
+ * this to *ul_pipe = 3, which frees up CE 0.
+ */
+ /* *ul_pipe = 3; */
+ *ul_pipe = 0;
+ *dl_pipe = 1;
+ break;
+
+ case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
+ case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
+ case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
+ case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
+
+ case ATH10K_HTC_SVC_ID_WMI_CONTROL:
+ *ul_pipe = 3;
+ *dl_pipe = 2;
+ break;
+
+ /* pipe 5 unused */
+ /* pipe 6 reserved */
+ /* pipe 7 reserved */
+
+ default:
+ ret = -1;
+ break;
+ }
+ *ul_is_polled =
+ (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
+
+ return ret;
+}
+
+static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ int ul_is_polled, dl_is_polled;
+
+ (void)ath10k_pci_hif_map_service_to_pipe(ar,
+ ATH10K_HTC_SVC_ID_RSVD_CTRL,
+ ul_pipe,
+ dl_pipe,
+ &ul_is_polled,
+ &dl_is_polled);
+}
+
+static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
+ int num)
+{
+ struct ath10k *ar = pipe_info->hif_ce_state;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ce_state *ce_state = pipe_info->ce_hdl;
+ struct sk_buff *skb;
+ dma_addr_t ce_data;
+ int i, ret = 0;
+
+ if (pipe_info->buf_sz == 0)
+ return 0;
+
+ for (i = 0; i < num; i++) {
+ skb = dev_alloc_skb(pipe_info->buf_sz);
+ if (!skb) {
+ ath10k_warn("could not allocate skbuff for pipe %d\n",
+ num);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
+
+ ce_data = dma_map_single(ar->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
+ ath10k_warn("could not dma map skbuff\n");
+ dev_kfree_skb_any(skb);
+ ret = -EIO;
+ goto err;
+ }
+
+ ATH10K_SKB_CB(skb)->paddr = ce_data;
+
+ pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
+ pipe_info->buf_sz,
+ PCI_DMA_FROMDEVICE);
+
+ ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
+ ce_data);
+ if (ret) {
+ ath10k_warn("could not enqueue to pipe %d (%d)\n",
+ num, ret);
+ goto err;
+ }
+ }
+
+ return ret;
+
+err:
+ ath10k_pci_rx_pipe_cleanup(pipe_info);
+ return ret;
+}
+
+static int ath10k_pci_post_rx(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct hif_ce_pipe_info *pipe_info;
+ const struct ce_attr *attr;
+ int pipe_num, ret = 0;
+
+ for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+ pipe_info = &ar_pci->pipe_info[pipe_num];
+ attr = &host_ce_config_wlan[pipe_num];
+
+ if (attr->dest_nentries == 0)
+ continue;
+
+ ret = ath10k_pci_post_rx_pipe(pipe_info,
+ attr->dest_nentries - 1);
+ if (ret) {
+ ath10k_warn("Unable to replenish recv buffers for pipe: %d\n",
+ pipe_num);
+
+ for (; pipe_num >= 0; pipe_num--) {
+ pipe_info = &ar_pci->pipe_info[pipe_num];
+ ath10k_pci_rx_pipe_cleanup(pipe_info);
+ }
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath10k_pci_hif_start(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ ret = ath10k_pci_start_ce(ar);
+ if (ret) {
+ ath10k_warn("could not start CE (%d)\n", ret);
+ return ret;
+ }
+
+ /* Post buffers once to start things off. */
+ ret = ath10k_pci_post_rx(ar);
+ if (ret) {
+ ath10k_warn("could not post rx pipes (%d)\n", ret);
+ return ret;
+ }
+
+ ar_pci->started = 1;
+ return 0;
+}
+
+static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
+{
+ struct ath10k *ar;
+ struct ath10k_pci *ar_pci;
+ struct ce_state *ce_hdl;
+ u32 buf_sz;
+ struct sk_buff *netbuf;
+ u32 ce_data;
+
+ buf_sz = pipe_info->buf_sz;
+
+ /* Unused Copy Engine */
+ if (buf_sz == 0)
+ return;
+
+ ar = pipe_info->hif_ce_state;
+ ar_pci = ath10k_pci_priv(ar);
+
+ if (!ar_pci->started)
+ return;
+
+ ce_hdl = pipe_info->ce_hdl;
+
+ while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
+ &ce_data) == 0) {
+ dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
+ netbuf->len + skb_tailroom(netbuf),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(netbuf);
+ }
+}
+
+static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
+{
+ struct ath10k *ar;
+ struct ath10k_pci *ar_pci;
+ struct ce_state *ce_hdl;
+ struct sk_buff *netbuf;
+ u32 ce_data;
+ unsigned int nbytes;
+ unsigned int id;
+ u32 buf_sz;
+
+ buf_sz = pipe_info->buf_sz;
+
+ /* Unused Copy Engine */
+ if (buf_sz == 0)
+ return;
+
+ ar = pipe_info->hif_ce_state;
+ ar_pci = ath10k_pci_priv(ar);
+
+ if (!ar_pci->started)
+ return;
+
+ ce_hdl = pipe_info->ce_hdl;
+
+ while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
+ &ce_data, &nbytes, &id) == 0) {
+ if (netbuf != CE_SENDLIST_ITEM_CTXT)
+ /*
+ * Indicate the completion to higer layer to free
+ * the buffer
+ */
+ ATH10K_SKB_CB(netbuf)->is_aborted = true;
+ ar_pci->msg_callbacks_current.tx_completion(ar,
+ netbuf,
+ id);
+ }
+}
+
+/*
+ * Cleanup residual buffers for device shutdown:
+ * buffers that were enqueued for receive
+ * buffers that were to be sent
+ * Note: Buffers that had completed but which were
+ * not yet processed are on a completion queue. They
+ * are handled when the completion thread shuts down.
+ */
+static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int pipe_num;
+
+ for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+ struct hif_ce_pipe_info *pipe_info;
+
+ pipe_info = &ar_pci->pipe_info[pipe_num];
+ ath10k_pci_rx_pipe_cleanup(pipe_info);
+ ath10k_pci_tx_pipe_cleanup(pipe_info);
+ }
+}
+
+static void ath10k_pci_ce_deinit(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct hif_ce_pipe_info *pipe_info;
+ int pipe_num;
+
+ for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+ pipe_info = &ar_pci->pipe_info[pipe_num];
+ if (pipe_info->ce_hdl) {
+ ath10k_ce_deinit(pipe_info->ce_hdl);
+ pipe_info->ce_hdl = NULL;
+ pipe_info->buf_sz = 0;
+ }
+ }
+}
+
+static void ath10k_pci_hif_stop(struct ath10k *ar)
+{
+ ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+
+ ath10k_pci_stop_ce(ar);
+
+ /* At this point, asynchronous threads are stopped, the target should
+ * not DMA nor interrupt. We process the leftovers and then free
+ * everything else up. */
+
+ ath10k_pci_process_ce(ar);
+ ath10k_pci_cleanup_ce(ar);
+ ath10k_pci_buffer_cleanup(ar);
+ ath10k_pci_ce_deinit(ar);
+}
+
+static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
+ void *req, u32 req_len,
+ void *resp, u32 *resp_len)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ce_state *ce_tx = ar_pci->pipe_info[BMI_CE_NUM_TO_TARG].ce_hdl;
+ struct ce_state *ce_rx = ar_pci->pipe_info[BMI_CE_NUM_TO_HOST].ce_hdl;
+ dma_addr_t req_paddr = 0;
+ dma_addr_t resp_paddr = 0;
+ struct bmi_xfer xfer = {};
+ void *treq, *tresp = NULL;
+ int ret = 0;
+
+ if (resp && !resp_len)
+ return -EINVAL;
+
+ if (resp && resp_len && *resp_len == 0)
+ return -EINVAL;
+
+ treq = kmemdup(req, req_len, GFP_KERNEL);
+ if (!treq)
+ return -ENOMEM;
+
+ req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(ar->dev, req_paddr);
+ if (ret)
+ goto err_dma;
+
+ if (resp && resp_len) {
+ tresp = kzalloc(*resp_len, GFP_KERNEL);
+ if (!tresp) {
+ ret = -ENOMEM;
+ goto err_req;
+ }
+
+ resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
+ DMA_FROM_DEVICE);
+ ret = dma_mapping_error(ar->dev, resp_paddr);
+ if (ret)
+ goto err_req;
+
+ xfer.wait_for_resp = true;
+ xfer.resp_len = 0;
+
+ ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
+ }
+
+ init_completion(&xfer.done);
+
+ ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
+ if (ret)
+ goto err_resp;
+
+ ret = wait_for_completion_timeout(&xfer.done,
+ BMI_COMMUNICATION_TIMEOUT_HZ);
+ if (ret <= 0) {
+ u32 unused_buffer;
+ unsigned int unused_nbytes;
+ unsigned int unused_id;
+
+ ret = -ETIMEDOUT;
+ ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
+ &unused_nbytes, &unused_id);
+ } else {
+ /* non-zero means we did not time out */
+ ret = 0;
+ }
+
+err_resp:
+ if (resp) {
+ u32 unused_buffer;
+
+ ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
+ dma_unmap_single(ar->dev, resp_paddr,
+ *resp_len, DMA_FROM_DEVICE);
+ }
+err_req:
+ dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
+
+ if (ret == 0 && resp_len) {
+ *resp_len = min(*resp_len, xfer.resp_len);
+ memcpy(resp, tresp, xfer.resp_len);
+ }
+err_dma:
+ kfree(treq);
+ kfree(tresp);
+
+ return ret;
+}
+
+static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,
+ void *transfer_context,
+ u32 data,
+ unsigned int nbytes,
+ unsigned int transfer_id)
+{
+ struct bmi_xfer *xfer = transfer_context;
+
+ if (xfer->wait_for_resp)
+ return;
+
+ complete(&xfer->done);
+}
+
+static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state,
+ void *transfer_context,
+ u32 data,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags)
+{
+ struct bmi_xfer *xfer = transfer_context;
+
+ if (!xfer->wait_for_resp) {
+ ath10k_warn("unexpected: BMI data received; ignoring\n");
+ return;
+ }
+
+ xfer->resp_len = nbytes;
+ complete(&xfer->done);
+}
+
+/*
+ * Map from service/endpoint to Copy Engine.
+ * This table is derived from the CE_PCI TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
+ {
+ ATH10K_HTC_SVC_ID_WMI_DATA_VO,
+ PIPEDIR_OUT, /* out = UL = host -> target */
+ 3,
+ },
+ {
+ ATH10K_HTC_SVC_ID_WMI_DATA_VO,
+ PIPEDIR_IN, /* in = DL = target -> host */
+ 2,
+ },
+ {
+ ATH10K_HTC_SVC_ID_WMI_DATA_BK,
+ PIPEDIR_OUT, /* out = UL = host -> target */
+ 3,
+ },
+ {
+ ATH10K_HTC_SVC_ID_WMI_DATA_BK,
+ PIPEDIR_IN, /* in = DL = target -> host */
+ 2,
+ },
+ {
+ ATH10K_HTC_SVC_ID_WMI_DATA_BE,
+ PIPEDIR_OUT, /* out = UL = host -> target */
+ 3,
+ },
+ {
+ ATH10K_HTC_SVC_ID_WMI_DATA_BE,
+ PIPEDIR_IN, /* in = DL = target -> host */
+ 2,
+ },
+ {
+ ATH10K_HTC_SVC_ID_WMI_DATA_VI,
+ PIPEDIR_OUT, /* out = UL = host -> target */
+ 3,
+ },
+ {
+ ATH10K_HTC_SVC_ID_WMI_DATA_VI,
+ PIPEDIR_IN, /* in = DL = target -> host */
+ 2,
+ },
+ {
+ ATH10K_HTC_SVC_ID_WMI_CONTROL,
+ PIPEDIR_OUT, /* out = UL = host -> target */
+ 3,
+ },
+ {
+ ATH10K_HTC_SVC_ID_WMI_CONTROL,
+ PIPEDIR_IN, /* in = DL = target -> host */
+ 2,
+ },
+ {
+ ATH10K_HTC_SVC_ID_RSVD_CTRL,
+ PIPEDIR_OUT, /* out = UL = host -> target */
+ 0, /* could be moved to 3 (share with WMI) */
+ },
+ {
+ ATH10K_HTC_SVC_ID_RSVD_CTRL,
+ PIPEDIR_IN, /* in = DL = target -> host */
+ 1,
+ },
+ {
+ ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
+ PIPEDIR_OUT, /* out = UL = host -> target */
+ 0,
+ },
+ {
+ ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
+ PIPEDIR_IN, /* in = DL = target -> host */
+ 1,
+ },
+ {
+ ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
+ PIPEDIR_OUT, /* out = UL = host -> target */
+ 4,
+ },
+ {
+ ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
+ PIPEDIR_IN, /* in = DL = target -> host */
+ 1,
+ },
+
+ /* (Additions here) */
+
+ { /* Must be last */
+ 0,
+ 0,
+ 0,
+ },
+};
+
+/*
+ * Send an interrupt to the device to wake up the Target CPU
+ * so it has an opportunity to notice any changed state.
+ */
+static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
+{
+ int ret;
+ u32 core_ctrl;
+
+ ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
+ CORE_CTRL_ADDRESS,
+ &core_ctrl);
+ if (ret) {
+ ath10k_warn("Unable to read core ctrl\n");
+ return ret;
+ }
+
+ /* A_INUM_FIRMWARE interrupt to Target CPU */
+ core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
+
+ ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
+ CORE_CTRL_ADDRESS,
+ core_ctrl);
+ if (ret)
+ ath10k_warn("Unable to set interrupt mask\n");
+
+ return ret;
+}
+
+static int ath10k_pci_init_config(struct ath10k *ar)
+{
+ u32 interconnect_targ_addr;
+ u32 pcie_state_targ_addr = 0;
+ u32 pipe_cfg_targ_addr = 0;
+ u32 svc_to_pipe_map = 0;
+ u32 pcie_config_flags = 0;
+ u32 ealloc_value;
+ u32 ealloc_targ_addr;
+ u32 flag2_value;
+ u32 flag2_targ_addr;
+ int ret = 0;
+
+ /* Download to Target the CE Config and the service-to-CE map */
+ interconnect_targ_addr =
+ host_interest_item_address(HI_ITEM(hi_interconnect_state));
+
+ /* Supply Target-side CE configuration */
+ ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
+ &pcie_state_targ_addr);
+ if (ret != 0) {
+ ath10k_err("Failed to get pcie state addr: %d\n", ret);
+ return ret;
+ }
+
+ if (pcie_state_targ_addr == 0) {
+ ret = -EIO;
+ ath10k_err("Invalid pcie state addr\n");
+ return ret;
+ }
+
+ ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
+ offsetof(struct pcie_state,
+ pipe_cfg_addr),
+ &pipe_cfg_targ_addr);
+ if (ret != 0) {
+ ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
+ return ret;
+ }
+
+ if (pipe_cfg_targ_addr == 0) {
+ ret = -EIO;
+ ath10k_err("Invalid pipe cfg addr\n");
+ return ret;
+ }
+
+ ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
+ target_ce_config_wlan,
+ sizeof(target_ce_config_wlan));
+
+ if (ret != 0) {
+ ath10k_err("Failed to write pipe cfg: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
+ offsetof(struct pcie_state,
+ svc_to_pipe_map),
+ &svc_to_pipe_map);
+ if (ret != 0) {
+ ath10k_err("Failed to get svc/pipe map: %d\n", ret);
+ return ret;
+ }
+
+ if (svc_to_pipe_map == 0) {
+ ret = -EIO;
+ ath10k_err("Invalid svc_to_pipe map\n");
+ return ret;
+ }
+
+ ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
+ target_service_to_ce_map_wlan,
+ sizeof(target_service_to_ce_map_wlan));
+ if (ret != 0) {
+ ath10k_err("Failed to write svc/pipe map: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
+ offsetof(struct pcie_state,
+ config_flags),
+ &pcie_config_flags);
+ if (ret != 0) {
+ ath10k_err("Failed to get pcie config_flags: %d\n", ret);
+ return ret;
+ }
+
+ pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
+
+ ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
+ offsetof(struct pcie_state, config_flags),
+ &pcie_config_flags,
+ sizeof(pcie_config_flags));
+ if (ret != 0) {
+ ath10k_err("Failed to write pcie config_flags: %d\n", ret);
+ return ret;
+ }
+
+ /* configure early allocation */
+ ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
+
+ ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
+ if (ret != 0) {
+ ath10k_err("Faile to get early alloc val: %d\n", ret);
+ return ret;
+ }
+
+ /* first bank is switched to IRAM */
+ ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
+ HI_EARLY_ALLOC_MAGIC_MASK);
+ ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
+ HI_EARLY_ALLOC_IRAM_BANKS_MASK);
+
+ ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
+ if (ret != 0) {
+ ath10k_err("Failed to set early alloc val: %d\n", ret);
+ return ret;
+ }
+
+ /* Tell Target to proceed with initialization */
+ flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
+
+ ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
+ if (ret != 0) {
+ ath10k_err("Failed to get option val: %d\n", ret);
+ return ret;
+ }
+
+ flag2_value |= HI_OPTION_EARLY_CFG_DONE;
+
+ ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
+ if (ret != 0) {
+ ath10k_err("Failed to set option val: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+
+
+static int ath10k_pci_ce_init(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct hif_ce_pipe_info *pipe_info;
+ const struct ce_attr *attr;
+ int pipe_num;
+
+ for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+ pipe_info = &ar_pci->pipe_info[pipe_num];
+ pipe_info->pipe_num = pipe_num;
+ pipe_info->hif_ce_state = ar;
+ attr = &host_ce_config_wlan[pipe_num];
+
+ pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
+ if (pipe_info->ce_hdl == NULL) {
+ ath10k_err("Unable to initialize CE for pipe: %d\n",
+ pipe_num);
+
+ /* It is safe to call it here. It checks if ce_hdl is
+ * valid for each pipe */
+ ath10k_pci_ce_deinit(ar);
+ return -1;
+ }
+
+ if (pipe_num == ar_pci->ce_count - 1) {
+ /*
+ * Reserve the ultimate CE for
+ * diagnostic Window support
+ */
+ ar_pci->ce_diag =
+ ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl;
+ continue;
+ }
+
+ pipe_info->buf_sz = (size_t) (attr->src_sz_max);
+ }
+
+ /*
+ * Initially, establish CE completion handlers for use with BMI.
+ * These are overwritten with generic handlers after we exit BMI phase.
+ */
+ pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
+ ath10k_ce_send_cb_register(pipe_info->ce_hdl,
+ ath10k_pci_bmi_send_done, 0);
+
+ pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
+ ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
+ ath10k_pci_bmi_recv_data);
+
+ return 0;
+}
+
+static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ u32 fw_indicator_address, fw_indicator;
+
+ ath10k_pci_wake(ar);
+
+ fw_indicator_address = ar_pci->fw_indicator_address;
+ fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
+
+ if (fw_indicator & FW_IND_EVENT_PENDING) {
+ /* ACK: clear Target-side pending event */
+ ath10k_pci_write32(ar, fw_indicator_address,
+ fw_indicator & ~FW_IND_EVENT_PENDING);
+
+ if (ar_pci->started) {
+ ath10k_pci_hif_dump_area(ar);
+ } else {
+ /*
+ * Probable Target failure before we're prepared
+ * to handle it. Generally unexpected.
+ */
+ ath10k_warn("early firmware event indicated\n");
+ }
+ }
+
+ ath10k_pci_sleep(ar);
+}
+
+static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
+ .send_head = ath10k_pci_hif_send_head,
+ .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
+ .start = ath10k_pci_hif_start,
+ .stop = ath10k_pci_hif_stop,
+ .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
+ .get_default_pipe = ath10k_pci_hif_get_default_pipe,
+ .send_complete_check = ath10k_pci_hif_send_complete_check,
+ .init = ath10k_pci_hif_post_init,
+ .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
+};
+
+static void ath10k_pci_ce_tasklet(unsigned long ptr)
+{
+ struct hif_ce_pipe_info *pipe = (struct hif_ce_pipe_info *)ptr;
+ struct ath10k_pci *ar_pci = pipe->ar_pci;
+
+ ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
+}
+
+static void ath10k_msi_err_tasklet(unsigned long data)
+{
+ struct ath10k *ar = (struct ath10k *)data;
+
+ ath10k_pci_fw_interrupt_handler(ar);
+}
+
+/*
+ * Handler for a per-engine interrupt on a PARTICULAR CE.
+ * This is used in cases where each CE has a private MSI interrupt.
+ */
+static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
+{
+ struct ath10k *ar = arg;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
+
+ if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
+ ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
+ return IRQ_HANDLED;
+ }
+
+ /*
+ * NOTE: We are able to derive ce_id from irq because we
+ * use a one-to-one mapping for CE's 0..5.
+ * CE's 6 & 7 do not use interrupts at all.
+ *
+ * This mapping must be kept in sync with the mapping
+ * used by firmware.
+ */
+ tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
+{
+ struct ath10k *ar = arg;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ tasklet_schedule(&ar_pci->msi_fw_err);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Top-level interrupt handler for all PCI interrupts from a Target.
+ * When a block of MSI interrupts is allocated, this top-level handler
+ * is not used; instead, we directly call the correct sub-handler.
+ */
+static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
+{
+ struct ath10k *ar = arg;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ if (ar_pci->num_msi_intrs == 0) {
+ /*
+ * IMPORTANT: INTR_CLR regiser has to be set after
+ * INTR_ENABLE is set to 0, otherwise interrupt can not be
+ * really cleared.
+ */
+ iowrite32(0, ar_pci->mem +
+ (SOC_CORE_BASE_ADDRESS |
+ PCIE_INTR_ENABLE_ADDRESS));
+ iowrite32(PCIE_INTR_FIRMWARE_MASK |
+ PCIE_INTR_CE_MASK_ALL,
+ ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
+ PCIE_INTR_CLR_ADDRESS));
+ /*
+ * IMPORTANT: this extra read transaction is required to
+ * flush the posted write buffer.
+ */
+ (void) ioread32(ar_pci->mem +
+ (SOC_CORE_BASE_ADDRESS |
+ PCIE_INTR_ENABLE_ADDRESS));
+ }
+
+ tasklet_schedule(&ar_pci->intr_tq);
+
+ return IRQ_HANDLED;
+}
+
+static void ath10k_pci_tasklet(unsigned long data)
+{
+ struct ath10k *ar = (struct ath10k *)data;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
+ ath10k_ce_per_engine_service_any(ar);
+
+ if (ar_pci->num_msi_intrs == 0) {
+ /* Enable Legacy PCI line interrupts */
+ iowrite32(PCIE_INTR_FIRMWARE_MASK |
+ PCIE_INTR_CE_MASK_ALL,
+ ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
+ PCIE_INTR_ENABLE_ADDRESS));
+ /*
+ * IMPORTANT: this extra read transaction is required to
+ * flush the posted write buffer
+ */
+ (void) ioread32(ar_pci->mem +
+ (SOC_CORE_BASE_ADDRESS |
+ PCIE_INTR_ENABLE_ADDRESS));
+ }
+}
+
+static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+ int i;
+
+ ret = pci_enable_msi_block(ar_pci->pdev, num);
+ if (ret)
+ return ret;
+
+ ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
+ ath10k_pci_msi_fw_handler,
+ IRQF_SHARED, "ath10k_pci", ar);
+ if (ret)
+ return ret;
+
+ for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
+ ret = request_irq(ar_pci->pdev->irq + i,
+ ath10k_pci_per_engine_handler,
+ IRQF_SHARED, "ath10k_pci", ar);
+ if (ret) {
+ ath10k_warn("request_irq(%d) failed %d\n",
+ ar_pci->pdev->irq + i, ret);
+
+ for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
+ free_irq(ar_pci->pdev->irq + i, ar);
+
+ free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
+ pci_disable_msi(ar_pci->pdev);
+ return ret;
+ }
+ }
+
+ ath10k_info("MSI-X interrupt handling (%d intrs)\n", num);
+ return 0;
+}
+
+static int ath10k_pci_start_intr_msi(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ ret = pci_enable_msi(ar_pci->pdev);
+ if (ret < 0)
+ return ret;
+
+ ret = request_irq(ar_pci->pdev->irq,
+ ath10k_pci_interrupt_handler,
+ IRQF_SHARED, "ath10k_pci", ar);
+ if (ret < 0) {
+ pci_disable_msi(ar_pci->pdev);
+ return ret;
+ }
+
+ ath10k_info("MSI interrupt handling\n");
+ return 0;
+}
+
+static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ ret = request_irq(ar_pci->pdev->irq,
+ ath10k_pci_interrupt_handler,
+ IRQF_SHARED, "ath10k_pci", ar);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Make sure to wake the Target before enabling Legacy
+ * Interrupt.
+ */
+ iowrite32(PCIE_SOC_WAKE_V_MASK,
+ ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+ PCIE_SOC_WAKE_ADDRESS);
+
+ ath10k_pci_wait(ar);
+
+ /*
+ * A potential race occurs here: The CORE_BASE write
+ * depends on target correctly decoding AXI address but
+ * host won't know when target writes BAR to CORE_CTRL.
+ * This write might get lost if target has NOT written BAR.
+ * For now, fix the race by repeating the write in below
+ * synchronization checking.
+ */
+ iowrite32(PCIE_INTR_FIRMWARE_MASK |
+ PCIE_INTR_CE_MASK_ALL,
+ ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
+ PCIE_INTR_ENABLE_ADDRESS));
+ iowrite32(PCIE_SOC_WAKE_RESET,
+ ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+ PCIE_SOC_WAKE_ADDRESS);
+
+ ath10k_info("legacy interrupt handling\n");
+ return 0;
+}
+
+static int ath10k_pci_start_intr(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int num = MSI_NUM_REQUEST;
+ int ret;
+ int i;
+
+ tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar);
+ tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
+ (unsigned long) ar);
+
+ for (i = 0; i < CE_COUNT; i++) {
+ ar_pci->pipe_info[i].ar_pci = ar_pci;
+ tasklet_init(&ar_pci->pipe_info[i].intr,
+ ath10k_pci_ce_tasklet,
+ (unsigned long)&ar_pci->pipe_info[i]);
+ }
+
+ if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
+ num = 1;
+
+ if (num > 1) {
+ ret = ath10k_pci_start_intr_msix(ar, num);
+ if (ret == 0)
+ goto exit;
+
+ ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret);
+ num = 1;
+ }
+
+ if (num == 1) {
+ ret = ath10k_pci_start_intr_msi(ar);
+ if (ret == 0)
+ goto exit;
+
+ ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n",
+ ret);
+ num = 0;
+ }
+
+ ret = ath10k_pci_start_intr_legacy(ar);
+
+exit:
+ ar_pci->num_msi_intrs = num;
+ ar_pci->ce_count = CE_COUNT;
+ return ret;
+}
+
+static void ath10k_pci_stop_intr(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int i;
+
+ /* There's at least one interrupt irregardless whether its legacy INTR
+ * or MSI or MSI-X */
+ for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
+ free_irq(ar_pci->pdev->irq + i, ar);
+
+ if (ar_pci->num_msi_intrs > 0)
+ pci_disable_msi(ar_pci->pdev);
+}
+
+static int ath10k_pci_reset_target(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int wait_limit = 300; /* 3 sec */
+
+ /* Wait for Target to finish initialization before we proceed. */
+ iowrite32(PCIE_SOC_WAKE_V_MASK,
+ ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+ PCIE_SOC_WAKE_ADDRESS);
+
+ ath10k_pci_wait(ar);
+
+ while (wait_limit-- &&
+ !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
+ FW_IND_INITIALIZED)) {
+ if (ar_pci->num_msi_intrs == 0)
+ /* Fix potential race by repeating CORE_BASE writes */
+ iowrite32(PCIE_INTR_FIRMWARE_MASK |
+ PCIE_INTR_CE_MASK_ALL,
+ ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
+ PCIE_INTR_ENABLE_ADDRESS));
+ mdelay(10);
+ }
+
+ if (wait_limit < 0) {
+ ath10k_err("Target stalled\n");
+ iowrite32(PCIE_SOC_WAKE_RESET,
+ ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+ PCIE_SOC_WAKE_ADDRESS);
+ return -EIO;
+ }
+
+ iowrite32(PCIE_SOC_WAKE_RESET,
+ ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+ PCIE_SOC_WAKE_ADDRESS);
+
+ return 0;
+}
+
+static void ath10k_pci_device_reset(struct ath10k_pci *ar_pci)
+{
+ struct ath10k *ar = ar_pci->ar;
+ void __iomem *mem = ar_pci->mem;
+ int i;
+ u32 val;
+
+ if (!SOC_GLOBAL_RESET_ADDRESS)
+ return;
+
+ if (!mem)
+ return;
+
+ ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS,
+ PCIE_SOC_WAKE_V_MASK);
+ for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
+ if (ath10k_pci_target_is_awake(ar))
+ break;
+ msleep(1);
+ }
+
+ /* Put Target, including PCIe, into RESET. */
+ val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS);
+ val |= 1;
+ ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
+
+ for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
+ if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
+ RTC_STATE_COLD_RESET_MASK)
+ break;
+ msleep(1);
+ }
+
+ /* Pull Target, including PCIe, out of RESET. */
+ val &= ~1;
+ ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
+
+ for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
+ if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
+ RTC_STATE_COLD_RESET_MASK))
+ break;
+ msleep(1);
+ }
+
+ ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
+}
+
+static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
+{
+ int i;
+
+ for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
+ if (!test_bit(i, ar_pci->features))
+ continue;
+
+ switch (i) {
+ case ATH10K_PCI_FEATURE_MSI_X:
+ ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
+ break;
+ case ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND:
+ ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n");
+ break;
+ }
+ }
+}
+
+static int ath10k_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_dev)
+{
+ void __iomem *mem;
+ int ret = 0;
+ struct ath10k *ar;
+ struct ath10k_pci *ar_pci;
+ u32 lcr_val;
+
+ ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+
+ ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
+ if (ar_pci == NULL)
+ return -ENOMEM;
+
+ ar_pci->pdev = pdev;
+ ar_pci->dev = &pdev->dev;
+
+ switch (pci_dev->device) {
+ case QCA988X_1_0_DEVICE_ID:
+ set_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features);
+ break;
+ case QCA988X_2_0_DEVICE_ID:
+ set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
+ break;
+ default:
+ ret = -ENODEV;
+ ath10k_err("Unkown device ID: %d\n", pci_dev->device);
+ goto err_ar_pci;
+ }
+
+ ath10k_pci_dump_features(ar_pci);
+
+ ar = ath10k_core_create(ar_pci, ar_pci->dev, ATH10K_BUS_PCI,
+ &ath10k_pci_hif_ops);
+ if (!ar) {
+ ath10k_err("ath10k_core_create failed!\n");
+ ret = -EINVAL;
+ goto err_ar_pci;
+ }
+
+ /* Enable QCA988X_1.0 HW workarounds */
+ if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features))
+ spin_lock_init(&ar_pci->hw_v1_workaround_lock);
+
+ ar_pci->ar = ar;
+ ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
+ atomic_set(&ar_pci->keep_awake_count, 0);
+
+ pci_set_drvdata(pdev, ar);
+
+ /*
+ * Without any knowledge of the Host, the Target may have been reset or
+ * power cycled and its Config Space may no longer reflect the PCI
+ * address space that was assigned earlier by the PCI infrastructure.
+ * Refresh it now.
+ */
+ ret = pci_assign_resource(pdev, BAR_NUM);
+ if (ret) {
+ ath10k_err("cannot assign PCI space: %d\n", ret);
+ goto err_ar;
+ }
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ ath10k_err("cannot enable PCI device: %d\n", ret);
+ goto err_ar;
+ }
+
+ /* Request MMIO resources */
+ ret = pci_request_region(pdev, BAR_NUM, "ath");
+ if (ret) {
+ ath10k_err("PCI MMIO reservation error: %d\n", ret);
+ goto err_device;
+ }
+
+ /*
+ * Target structures have a limit of 32 bit DMA pointers.
+ * DMA pointers can be wider than 32 bits by default on some systems.
+ */
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret) {
+ ath10k_err("32-bit DMA not available: %d\n", ret);
+ goto err_region;
+ }
+
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret) {
+ ath10k_err("cannot enable 32-bit consistent DMA\n");
+ goto err_region;
+ }
+
+ /* Set bus master bit in PCI_COMMAND to enable DMA */
+ pci_set_master(pdev);
+
+ /*
+ * Temporary FIX: disable ASPM
+ * Will be removed after the OTP is programmed
+ */
+ pci_read_config_dword(pdev, 0x80, &lcr_val);
+ pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
+
+ /* Arrange for access to Target SoC registers. */
+ mem = pci_iomap(pdev, BAR_NUM, 0);
+ if (!mem) {
+ ath10k_err("PCI iomap error\n");
+ ret = -EIO;
+ goto err_master;
+ }
+
+ ar_pci->mem = mem;
+
+ spin_lock_init(&ar_pci->ce_lock);
+
+ ar_pci->cacheline_sz = dma_get_cache_alignment();
+
+ ret = ath10k_pci_start_intr(ar);
+ if (ret) {
+ ath10k_err("could not start interrupt handling (%d)\n", ret);
+ goto err_iomap;
+ }
+
+ /*
+ * Bring the target up cleanly.
+ *
+ * The target may be in an undefined state with an AUX-powered Target
+ * and a Host in WoW mode. If the Host crashes, loses power, or is
+ * restarted (without unloading the driver) then the Target is left
+ * (aux) powered and running. On a subsequent driver load, the Target
+ * is in an unexpected state. We try to catch that here in order to
+ * reset the Target and retry the probe.
+ */
+ ath10k_pci_device_reset(ar_pci);
+
+ ret = ath10k_pci_reset_target(ar);
+ if (ret)
+ goto err_intr;
+
+ if (ath10k_target_ps) {
+ ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save enabled\n");
+ } else {
+ /* Force AWAKE forever */
+ ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save disabled\n");
+ ath10k_do_pci_wake(ar);
+ }
+
+ ret = ath10k_pci_ce_init(ar);
+ if (ret)
+ goto err_intr;
+
+ ret = ath10k_pci_init_config(ar);
+ if (ret)
+ goto err_ce;
+
+ ret = ath10k_pci_wake_target_cpu(ar);
+ if (ret) {
+ ath10k_err("could not wake up target CPU (%d)\n", ret);
+ goto err_ce;
+ }
+
+ ret = ath10k_core_register(ar);
+ if (ret) {
+ ath10k_err("could not register driver core (%d)\n", ret);
+ goto err_ce;
+ }
+
+ return 0;
+
+err_ce:
+ ath10k_pci_ce_deinit(ar);
+err_intr:
+ ath10k_pci_stop_intr(ar);
+err_iomap:
+ pci_iounmap(pdev, mem);
+err_master:
+ pci_clear_master(pdev);
+err_region:
+ pci_release_region(pdev, BAR_NUM);
+err_device:
+ pci_disable_device(pdev);
+err_ar:
+ pci_set_drvdata(pdev, NULL);
+ ath10k_core_destroy(ar);
+err_ar_pci:
+ /* call HIF PCI free here */
+ kfree(ar_pci);
+
+ return ret;
+}
+
+static void ath10k_pci_remove(struct pci_dev *pdev)
+{
+ struct ath10k *ar = pci_get_drvdata(pdev);
+ struct ath10k_pci *ar_pci;
+
+ ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+
+ if (!ar)
+ return;
+
+ ar_pci = ath10k_pci_priv(ar);
+
+ if (!ar_pci)
+ return;
+
+ tasklet_kill(&ar_pci->msi_fw_err);
+
+ ath10k_core_unregister(ar);
+ ath10k_pci_stop_intr(ar);
+
+ pci_set_drvdata(pdev, NULL);
+ pci_iounmap(pdev, ar_pci->mem);
+ pci_release_region(pdev, BAR_NUM);
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+
+ ath10k_core_destroy(ar);
+ kfree(ar_pci);
+}
+
+#if defined(CONFIG_PM_SLEEP)
+
+#define ATH10K_PCI_PM_CONTROL 0x44
+
+static int ath10k_pci_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct ath10k *ar = pci_get_drvdata(pdev);
+ struct ath10k_pci *ar_pci;
+ u32 val;
+ int ret, retval;
+
+ ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+
+ if (!ar)
+ return -ENODEV;
+
+ ar_pci = ath10k_pci_priv(ar);
+ if (!ar_pci)
+ return -ENODEV;
+
+ if (ath10k_core_target_suspend(ar))
+ return -EBUSY;
+
+ ret = wait_event_interruptible_timeout(ar->event_queue,
+ ar->is_target_paused == true,
+ 1 * HZ);
+ if (ret < 0) {
+ ath10k_warn("suspend interrupted (%d)\n", ret);
+ retval = ret;
+ goto resume;
+ } else if (ret == 0) {
+ ath10k_warn("suspend timed out - target pause event never came\n");
+ retval = EIO;
+ goto resume;
+ }
+
+ /*
+ * reset is_target_paused and host can check that in next time,
+ * or it will always be TRUE and host just skip the waiting
+ * condition, it causes target assert due to host already
+ * suspend
+ */
+ ar->is_target_paused = false;
+
+ pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
+
+ if ((val & 0x000000ff) != 0x3) {
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
+ (val & 0xffffff00) | 0x03);
+ }
+
+ return 0;
+resume:
+ ret = ath10k_core_target_resume(ar);
+ if (ret)
+ ath10k_warn("could not resume (%d)\n", ret);
+
+ return retval;
+}
+
+static int ath10k_pci_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct ath10k *ar = pci_get_drvdata(pdev);
+ struct ath10k_pci *ar_pci;
+ int ret;
+ u32 val;
+
+ ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+
+ if (!ar)
+ return -ENODEV;
+ ar_pci = ath10k_pci_priv(ar);
+
+ if (!ar_pci)
+ return -ENODEV;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ ath10k_warn("cannot enable PCI device: %d\n", ret);
+ return ret;
+ }
+
+ pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
+
+ if ((val & 0x000000ff) != 0) {
+ pci_restore_state(pdev);
+ pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
+ val & 0xffffff00);
+ /*
+ * Suspend/Resume resets the PCI configuration space,
+ * so we have to re-disable the RETRY_TIMEOUT register (0x41)
+ * to keep PCI Tx retries from interfering with C3 CPU state
+ */
+ pci_read_config_dword(pdev, 0x40, &val);
+
+ if ((val & 0x0000ff00) != 0)
+ pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+ }
+
+ ret = ath10k_core_target_resume(ar);
+ if (ret)
+ ath10k_warn("target resume failed: %d\n", ret);
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(ath10k_dev_pm_ops,
+ ath10k_pci_suspend,
+ ath10k_pci_resume);
+
+#define ATH10K_PCI_PM_OPS (&ath10k_dev_pm_ops)
+
+#else
+
+#define ATH10K_PCI_PM_OPS NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
+MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
+
+static struct pci_driver ath10k_pci_driver = {
+ .name = "ath10k_pci",
+ .id_table = ath10k_pci_id_table,
+ .probe = ath10k_pci_probe,
+ .remove = ath10k_pci_remove,
+ .driver.pm = ATH10K_PCI_PM_OPS,
+};
+
+static int __init ath10k_pci_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&ath10k_pci_driver);
+ if (ret)
+ ath10k_err("pci_register_driver failed [%d]\n", ret);
+
+ return ret;
+}
+module_init(ath10k_pci_init);
+
+static void __exit ath10k_pci_exit(void)
+{
+ pci_unregister_driver(&ath10k_pci_driver);
+}
+
+module_exit(ath10k_pci_exit);
+
+MODULE_AUTHOR("Qualcomm Atheros");
+MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_FW_FILE);
+MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_OTP_FILE);
+MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
new file mode 100644
index 000000000000..d2a055a07dc6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -0,0 +1,355 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _PCI_H_
+#define _PCI_H_
+
+#include <linux/interrupt.h>
+
+#include "hw.h"
+#include "ce.h"
+
+/* FW dump area */
+#define REG_DUMP_COUNT_QCA988X 60
+
+/*
+ * maximum number of bytes that can be handled atomically by DiagRead/DiagWrite
+ */
+#define DIAG_TRANSFER_LIMIT 2048
+
+/*
+ * maximum number of bytes that can be
+ * handled atomically by DiagRead/DiagWrite
+ */
+#define DIAG_TRANSFER_LIMIT 2048
+
+struct bmi_xfer {
+ struct completion done;
+ bool wait_for_resp;
+ u32 resp_len;
+};
+
+struct ath10k_pci_compl {
+ struct list_head list;
+ int send_or_recv;
+ struct ce_state *ce_state;
+ struct hif_ce_pipe_info *pipe_info;
+ void *transfer_context;
+ unsigned int nbytes;
+ unsigned int transfer_id;
+ unsigned int flags;
+};
+
+/* compl_state.send_or_recv */
+#define HIF_CE_COMPLETE_FREE 0
+#define HIF_CE_COMPLETE_SEND 1
+#define HIF_CE_COMPLETE_RECV 2
+
+/*
+ * PCI-specific Target state
+ *
+ * NOTE: Structure is shared between Host software and Target firmware!
+ *
+ * Much of this may be of interest to the Host so
+ * HOST_INTEREST->hi_interconnect_state points here
+ * (and all members are 32-bit quantities in order to
+ * facilitate Host access). In particular, Host software is
+ * required to initialize pipe_cfg_addr and svc_to_pipe_map.
+ */
+struct pcie_state {
+ /* Pipe configuration Target address */
+ /* NB: ce_pipe_config[CE_COUNT] */
+ u32 pipe_cfg_addr;
+
+ /* Service to pipe map Target address */
+ /* NB: service_to_pipe[PIPE_TO_CE_MAP_CN] */
+ u32 svc_to_pipe_map;
+
+ /* number of MSI interrupts requested */
+ u32 msi_requested;
+
+ /* number of MSI interrupts granted */
+ u32 msi_granted;
+
+ /* Message Signalled Interrupt address */
+ u32 msi_addr;
+
+ /* Base data */
+ u32 msi_data;
+
+ /*
+ * Data for firmware interrupt;
+ * MSI data for other interrupts are
+ * in various SoC registers
+ */
+ u32 msi_fw_intr_data;
+
+ /* PCIE_PWR_METHOD_* */
+ u32 power_mgmt_method;
+
+ /* PCIE_CONFIG_FLAG_* */
+ u32 config_flags;
+};
+
+/* PCIE_CONFIG_FLAG definitions */
+#define PCIE_CONFIG_FLAG_ENABLE_L1 0x0000001
+
+/* Host software's Copy Engine configuration. */
+#define CE_ATTR_FLAGS 0
+
+/*
+ * Configuration information for a Copy Engine pipe.
+ * Passed from Host to Target during startup (one per CE).
+ *
+ * NOTE: Structure is shared between Host software and Target firmware!
+ */
+struct ce_pipe_config {
+ u32 pipenum;
+ u32 pipedir;
+ u32 nentries;
+ u32 nbytes_max;
+ u32 flags;
+ u32 reserved;
+};
+
+/*
+ * Directions for interconnect pipe configuration.
+ * These definitions may be used during configuration and are shared
+ * between Host and Target.
+ *
+ * Pipe Directions are relative to the Host, so PIPEDIR_IN means
+ * "coming IN over air through Target to Host" as with a WiFi Rx operation.
+ * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air"
+ * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man"
+ * Target since things that are "PIPEDIR_OUT" are coming IN to the Target
+ * over the interconnect.
+ */
+#define PIPEDIR_NONE 0
+#define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */
+#define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */
+#define PIPEDIR_INOUT 3 /* bidirectional */
+
+/* Establish a mapping between a service/direction and a pipe. */
+struct service_to_pipe {
+ u32 service_id;
+ u32 pipedir;
+ u32 pipenum;
+};
+
+enum ath10k_pci_features {
+ ATH10K_PCI_FEATURE_MSI_X = 0,
+ ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND = 1,
+
+ /* keep last */
+ ATH10K_PCI_FEATURE_COUNT
+};
+
+/* Per-pipe state. */
+struct hif_ce_pipe_info {
+ /* Handle of underlying Copy Engine */
+ struct ce_state *ce_hdl;
+
+ /* Our pipe number; facilitiates use of pipe_info ptrs. */
+ u8 pipe_num;
+
+ /* Convenience back pointer to hif_ce_state. */
+ struct ath10k *hif_ce_state;
+
+ size_t buf_sz;
+
+ /* protects compl_free and num_send_allowed */
+ spinlock_t pipe_lock;
+
+ /* List of free CE completion slots */
+ struct list_head compl_free;
+
+ /* Limit the number of outstanding send requests. */
+ int num_sends_allowed;
+
+ struct ath10k_pci *ar_pci;
+ struct tasklet_struct intr;
+};
+
+struct ath10k_pci {
+ struct pci_dev *pdev;
+ struct device *dev;
+ struct ath10k *ar;
+ void __iomem *mem;
+ int cacheline_sz;
+
+ DECLARE_BITMAP(features, ATH10K_PCI_FEATURE_COUNT);
+
+ /*
+ * Number of MSI interrupts granted, 0 --> using legacy PCI line
+ * interrupts.
+ */
+ int num_msi_intrs;
+
+ struct tasklet_struct intr_tq;
+ struct tasklet_struct msi_fw_err;
+
+ /* Number of Copy Engines supported */
+ unsigned int ce_count;
+
+ int started;
+
+ atomic_t keep_awake_count;
+ bool verified_awake;
+
+ /* List of CE completions to be processed */
+ struct list_head compl_process;
+
+ /* protects compl_processing and compl_process */
+ spinlock_t compl_lock;
+
+ bool compl_processing;
+
+ struct hif_ce_pipe_info pipe_info[CE_COUNT_MAX];
+
+ struct ath10k_hif_cb msg_callbacks_current;
+
+ /* Target address used to signal a pending firmware event */
+ u32 fw_indicator_address;
+
+ /* Copy Engine used for Diagnostic Accesses */
+ struct ce_state *ce_diag;
+
+ /* FIXME: document what this really protects */
+ spinlock_t ce_lock;
+
+ /* Map CE id to ce_state */
+ struct ce_state *ce_id_to_state[CE_COUNT_MAX];
+
+ /* makes sure that dummy reads are atomic */
+ spinlock_t hw_v1_workaround_lock;
+};
+
+static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
+{
+ return ar->hif.priv;
+}
+
+static inline u32 ath10k_pci_reg_read32(void __iomem *mem, u32 addr)
+{
+ return ioread32(mem + PCIE_LOCAL_BASE_ADDRESS + addr);
+}
+
+static inline void ath10k_pci_reg_write32(void __iomem *mem, u32 addr, u32 val)
+{
+ iowrite32(val, mem + PCIE_LOCAL_BASE_ADDRESS + addr);
+}
+
+#define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
+#define PCIE_WAKE_TIMEOUT 5000 /* 5ms */
+
+#define BAR_NUM 0
+
+#define CDC_WAR_MAGIC_STR 0xceef0000
+#define CDC_WAR_DATA_CE 4
+
+/*
+ * TODO: Should be a function call specific to each Target-type.
+ * This convoluted macro converts from Target CPU Virtual Address Space to CE
+ * Address Space. As part of this process, we conservatively fetch the current
+ * PCIE_BAR. MOST of the time, this should match the upper bits of PCI space
+ * for this device; but that's not guaranteed.
+ */
+#define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr) \
+ (((ioread32((pci_addr)+(SOC_CORE_BASE_ADDRESS| \
+ CORE_CTRL_ADDRESS)) & 0x7ff) << 21) | \
+ 0x100000 | ((addr) & 0xfffff))
+
+/* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
+#define DIAG_ACCESS_CE_TIMEOUT_MS 10
+
+/*
+ * This API allows the Host to access Target registers directly
+ * and relatively efficiently over PCIe.
+ * This allows the Host to avoid extra overhead associated with
+ * sending a message to firmware and waiting for a response message
+ * from firmware, as is done on other interconnects.
+ *
+ * Yet there is some complexity with direct accesses because the
+ * Target's power state is not known a priori. The Host must issue
+ * special PCIe reads/writes in order to explicitly wake the Target
+ * and to verify that it is awake and will remain awake.
+ *
+ * Usage:
+ *
+ * Use ath10k_pci_read32 and ath10k_pci_write32 to access Target space.
+ * These calls must be bracketed by ath10k_pci_wake and
+ * ath10k_pci_sleep. A single BEGIN/END pair is adequate for
+ * multiple READ/WRITE operations.
+ *
+ * Use ath10k_pci_wake to put the Target in a state in
+ * which it is legal for the Host to directly access it. This
+ * may involve waking the Target from a low power state, which
+ * may take up to 2Ms!
+ *
+ * Use ath10k_pci_sleep to tell the Target that as far as
+ * this code path is concerned, it no longer needs to remain
+ * directly accessible. BEGIN/END is under a reference counter;
+ * multiple code paths may issue BEGIN/END on a single targid.
+ */
+static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
+ u32 value)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ void __iomem *addr = ar_pci->mem;
+
+ if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) {
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&ar_pci->hw_v1_workaround_lock, irq_flags);
+
+ ioread32(addr+offset+4); /* 3rd read prior to write */
+ ioread32(addr+offset+4); /* 2nd read prior to write */
+ ioread32(addr+offset+4); /* 1st read prior to write */
+ iowrite32(value, addr+offset);
+
+ spin_unlock_irqrestore(&ar_pci->hw_v1_workaround_lock,
+ irq_flags);
+ } else {
+ iowrite32(value, addr+offset);
+ }
+}
+
+static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ return ioread32(ar_pci->mem + offset);
+}
+
+extern unsigned int ath10k_target_ps;
+
+void ath10k_do_pci_wake(struct ath10k *ar);
+void ath10k_do_pci_sleep(struct ath10k *ar);
+
+static inline void ath10k_pci_wake(struct ath10k *ar)
+{
+ if (ath10k_target_ps)
+ ath10k_do_pci_wake(ar);
+}
+
+static inline void ath10k_pci_sleep(struct ath10k *ar)
+{
+ if (ath10k_target_ps)
+ ath10k_do_pci_sleep(ar);
+}
+
+#endif /* _PCI_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
new file mode 100644
index 000000000000..bfec6c8f2ecb
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -0,0 +1,990 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _RX_DESC_H_
+#define _RX_DESC_H_
+
+enum rx_attention_flags {
+ RX_ATTENTION_FLAGS_FIRST_MPDU = 1 << 0,
+ RX_ATTENTION_FLAGS_LAST_MPDU = 1 << 1,
+ RX_ATTENTION_FLAGS_MCAST_BCAST = 1 << 2,
+ RX_ATTENTION_FLAGS_PEER_IDX_INVALID = 1 << 3,
+ RX_ATTENTION_FLAGS_PEER_IDX_TIMEOUT = 1 << 4,
+ RX_ATTENTION_FLAGS_POWER_MGMT = 1 << 5,
+ RX_ATTENTION_FLAGS_NON_QOS = 1 << 6,
+ RX_ATTENTION_FLAGS_NULL_DATA = 1 << 7,
+ RX_ATTENTION_FLAGS_MGMT_TYPE = 1 << 8,
+ RX_ATTENTION_FLAGS_CTRL_TYPE = 1 << 9,
+ RX_ATTENTION_FLAGS_MORE_DATA = 1 << 10,
+ RX_ATTENTION_FLAGS_EOSP = 1 << 11,
+ RX_ATTENTION_FLAGS_U_APSD_TRIGGER = 1 << 12,
+ RX_ATTENTION_FLAGS_FRAGMENT = 1 << 13,
+ RX_ATTENTION_FLAGS_ORDER = 1 << 14,
+ RX_ATTENTION_FLAGS_CLASSIFICATION = 1 << 15,
+ RX_ATTENTION_FLAGS_OVERFLOW_ERR = 1 << 16,
+ RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR = 1 << 17,
+ RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL = 1 << 18,
+ RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL = 1 << 19,
+ RX_ATTENTION_FLAGS_SA_IDX_INVALID = 1 << 20,
+ RX_ATTENTION_FLAGS_DA_IDX_INVALID = 1 << 21,
+ RX_ATTENTION_FLAGS_SA_IDX_TIMEOUT = 1 << 22,
+ RX_ATTENTION_FLAGS_DA_IDX_TIMEOUT = 1 << 23,
+ RX_ATTENTION_FLAGS_ENCRYPT_REQUIRED = 1 << 24,
+ RX_ATTENTION_FLAGS_DIRECTED = 1 << 25,
+ RX_ATTENTION_FLAGS_BUFFER_FRAGMENT = 1 << 26,
+ RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR = 1 << 27,
+ RX_ATTENTION_FLAGS_TKIP_MIC_ERR = 1 << 28,
+ RX_ATTENTION_FLAGS_DECRYPT_ERR = 1 << 29,
+ RX_ATTENTION_FLAGS_FCS_ERR = 1 << 30,
+ RX_ATTENTION_FLAGS_MSDU_DONE = 1 << 31,
+};
+
+struct rx_attention {
+ __le32 flags; /* %RX_ATTENTION_FLAGS_ */
+} __packed;
+
+/*
+ * first_mpdu
+ * Indicates the first MSDU of the PPDU. If both first_mpdu
+ * and last_mpdu are set in the MSDU then this is a not an
+ * A-MPDU frame but a stand alone MPDU. Interior MPDU in an
+ * A-MPDU shall have both first_mpdu and last_mpdu bits set to
+ * 0. The PPDU start status will only be valid when this bit
+ * is set.
+ *
+ * last_mpdu
+ * Indicates the last MSDU of the last MPDU of the PPDU. The
+ * PPDU end status will only be valid when this bit is set.
+ *
+ * mcast_bcast
+ * Multicast / broadcast indicator. Only set when the MAC
+ * address 1 bit 0 is set indicating mcast/bcast and the BSSID
+ * matches one of the 4 BSSID registers. Only set when
+ * first_msdu is set.
+ *
+ * peer_idx_invalid
+ * Indicates no matching entries within the the max search
+ * count. Only set when first_msdu is set.
+ *
+ * peer_idx_timeout
+ * Indicates an unsuccessful search for the peer index due to
+ * timeout. Only set when first_msdu is set.
+ *
+ * power_mgmt
+ * Power management bit set in the 802.11 header. Only set
+ * when first_msdu is set.
+ *
+ * non_qos
+ * Set if packet is not a non-QoS data frame. Only set when
+ * first_msdu is set.
+ *
+ * null_data
+ * Set if frame type indicates either null data or QoS null
+ * data format. Only set when first_msdu is set.
+ *
+ * mgmt_type
+ * Set if packet is a management packet. Only set when
+ * first_msdu is set.
+ *
+ * ctrl_type
+ * Set if packet is a control packet. Only set when first_msdu
+ * is set.
+ *
+ * more_data
+ * Set if more bit in frame control is set. Only set when
+ * first_msdu is set.
+ *
+ * eosp
+ * Set if the EOSP (end of service period) bit in the QoS
+ * control field is set. Only set when first_msdu is set.
+ *
+ * u_apsd_trigger
+ * Set if packet is U-APSD trigger. Key table will have bits
+ * per TID to indicate U-APSD trigger.
+ *
+ * fragment
+ * Indicates that this is an 802.11 fragment frame. This is
+ * set when either the more_frag bit is set in the frame
+ * control or the fragment number is not zero. Only set when
+ * first_msdu is set.
+ *
+ * order
+ * Set if the order bit in the frame control is set. Only set
+ * when first_msdu is set.
+ *
+ * classification
+ * Indicates that this status has a corresponding MSDU that
+ * requires FW processing. The OLE will have classification
+ * ring mask registers which will indicate the ring(s) for
+ * packets and descriptors which need FW attention.
+ *
+ * overflow_err
+ * PCU Receive FIFO does not have enough space to store the
+ * full receive packet. Enough space is reserved in the
+ * receive FIFO for the status is written. This MPDU remaining
+ * packets in the PPDU will be filtered and no Ack response
+ * will be transmitted.
+ *
+ * msdu_length_err
+ * Indicates that the MSDU length from the 802.3 encapsulated
+ * length field extends beyond the MPDU boundary.
+ *
+ * tcp_udp_chksum_fail
+ * Indicates that the computed checksum (tcp_udp_chksum) did
+ * not match the checksum in the TCP/UDP header.
+ *
+ * ip_chksum_fail
+ * Indicates that the computed checksum did not match the
+ * checksum in the IP header.
+ *
+ * sa_idx_invalid
+ * Indicates no matching entry was found in the address search
+ * table for the source MAC address.
+ *
+ * da_idx_invalid
+ * Indicates no matching entry was found in the address search
+ * table for the destination MAC address.
+ *
+ * sa_idx_timeout
+ * Indicates an unsuccessful search for the source MAC address
+ * due to the expiring of the search timer.
+ *
+ * da_idx_timeout
+ * Indicates an unsuccessful search for the destination MAC
+ * address due to the expiring of the search timer.
+ *
+ * encrypt_required
+ * Indicates that this data type frame is not encrypted even if
+ * the policy for this MPDU requires encryption as indicated in
+ * the peer table key type.
+ *
+ * directed
+ * MPDU is a directed packet which means that the RA matched
+ * our STA addresses. In proxySTA it means that the TA matched
+ * an entry in our address search table with the corresponding
+ * 'no_ack' bit is the address search entry cleared.
+ *
+ * buffer_fragment
+ * Indicates that at least one of the rx buffers has been
+ * fragmented. If set the FW should look at the rx_frag_info
+ * descriptor described below.
+ *
+ * mpdu_length_err
+ * Indicates that the MPDU was pre-maturely terminated
+ * resulting in a truncated MPDU. Don't trust the MPDU length
+ * field.
+ *
+ * tkip_mic_err
+ * Indicates that the MPDU Michael integrity check failed
+ *
+ * decrypt_err
+ * Indicates that the MPDU decrypt integrity check failed
+ *
+ * fcs_err
+ * Indicates that the MPDU FCS check failed
+ *
+ * msdu_done
+ * If set indicates that the RX packet data, RX header data, RX
+ * PPDU start descriptor, RX MPDU start/end descriptor, RX MSDU
+ * start/end descriptors and RX Attention descriptor are all
+ * valid. This bit must be in the last octet of the
+ * descriptor.
+ */
+
+struct rx_frag_info {
+ u8 ring0_more_count;
+ u8 ring1_more_count;
+ u8 ring2_more_count;
+ u8 ring3_more_count;
+} __packed;
+
+/*
+ * ring0_more_count
+ * Indicates the number of more buffers associated with RX DMA
+ * ring 0. Field is filled in by the RX_DMA.
+ *
+ * ring1_more_count
+ * Indicates the number of more buffers associated with RX DMA
+ * ring 1. Field is filled in by the RX_DMA.
+ *
+ * ring2_more_count
+ * Indicates the number of more buffers associated with RX DMA
+ * ring 2. Field is filled in by the RX_DMA.
+ *
+ * ring3_more_count
+ * Indicates the number of more buffers associated with RX DMA
+ * ring 3. Field is filled in by the RX_DMA.
+ */
+
+enum htt_rx_mpdu_encrypt_type {
+ HTT_RX_MPDU_ENCRYPT_WEP40 = 0,
+ HTT_RX_MPDU_ENCRYPT_WEP104 = 1,
+ HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC = 2,
+ HTT_RX_MPDU_ENCRYPT_WEP128 = 3,
+ HTT_RX_MPDU_ENCRYPT_TKIP_WPA = 4,
+ HTT_RX_MPDU_ENCRYPT_WAPI = 5,
+ HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2 = 6,
+ HTT_RX_MPDU_ENCRYPT_NONE = 7,
+};
+
+#define RX_MPDU_START_INFO0_PEER_IDX_MASK 0x000007ff
+#define RX_MPDU_START_INFO0_PEER_IDX_LSB 0
+#define RX_MPDU_START_INFO0_SEQ_NUM_MASK 0x0fff0000
+#define RX_MPDU_START_INFO0_SEQ_NUM_LSB 16
+#define RX_MPDU_START_INFO0_ENCRYPT_TYPE_MASK 0xf0000000
+#define RX_MPDU_START_INFO0_ENCRYPT_TYPE_LSB 28
+#define RX_MPDU_START_INFO0_FROM_DS (1 << 11)
+#define RX_MPDU_START_INFO0_TO_DS (1 << 12)
+#define RX_MPDU_START_INFO0_ENCRYPTED (1 << 13)
+#define RX_MPDU_START_INFO0_RETRY (1 << 14)
+#define RX_MPDU_START_INFO0_TXBF_H_INFO (1 << 15)
+
+#define RX_MPDU_START_INFO1_TID_MASK 0xf0000000
+#define RX_MPDU_START_INFO1_TID_LSB 28
+#define RX_MPDU_START_INFO1_DIRECTED (1 << 16)
+
+struct rx_mpdu_start {
+ __le32 info0;
+ union {
+ struct {
+ __le32 pn31_0;
+ __le32 info1; /* %RX_MPDU_START_INFO1_ */
+ } __packed;
+ struct {
+ u8 pn[6];
+ } __packed;
+ } __packed;
+} __packed;
+
+/*
+ * peer_idx
+ * The index of the address search table which associated with
+ * the peer table entry corresponding to this MPDU. Only valid
+ * when first_msdu is set.
+ *
+ * fr_ds
+ * Set if the from DS bit is set in the frame control. Only
+ * valid when first_msdu is set.
+ *
+ * to_ds
+ * Set if the to DS bit is set in the frame control. Only
+ * valid when first_msdu is set.
+ *
+ * encrypted
+ * Protected bit from the frame control. Only valid when
+ * first_msdu is set.
+ *
+ * retry
+ * Retry bit from the frame control. Only valid when
+ * first_msdu is set.
+ *
+ * txbf_h_info
+ * The MPDU data will contain H information. Primarily used
+ * for debug.
+ *
+ * seq_num
+ * The sequence number from the 802.11 header. Only valid when
+ * first_msdu is set.
+ *
+ * encrypt_type
+ * Indicates type of decrypt cipher used (as defined in the
+ * peer table)
+ * 0: WEP40
+ * 1: WEP104
+ * 2: TKIP without MIC
+ * 3: WEP128
+ * 4: TKIP (WPA)
+ * 5: WAPI
+ * 6: AES-CCM (WPA2)
+ * 7: No cipher
+ * Only valid when first_msdu_is set
+ *
+ * pn_31_0
+ * Bits [31:0] of the PN number extracted from the IV field
+ * WEP: IV = {key_id_octet, pn2, pn1, pn0}. Only pn[23:0] is
+ * valid.
+ * TKIP: IV = {pn5, pn4, pn3, pn2, key_id_octet, pn0,
+ * WEPSeed[1], pn1}. Only pn[47:0] is valid.
+ * AES-CCM: IV = {pn5, pn4, pn3, pn2, key_id_octet, 0x0, pn1,
+ * pn0}. Only pn[47:0] is valid.
+ * WAPI: IV = {key_id_octet, 0x0, pn15, pn14, pn13, pn12, pn11,
+ * pn10, pn9, pn8, pn7, pn6, pn5, pn4, pn3, pn2, pn1, pn0}.
+ * The ext_wapi_pn[127:48] in the rx_msdu_misc descriptor and
+ * pn[47:0] are valid.
+ * Only valid when first_msdu is set.
+ *
+ * pn_47_32
+ * Bits [47:32] of the PN number. See description for
+ * pn_31_0. The remaining PN fields are in the rx_msdu_end
+ * descriptor
+ *
+ * pn
+ * Use this field to access the pn without worrying about
+ * byte-order and bitmasking/bitshifting.
+ *
+ * directed
+ * See definition in RX attention descriptor
+ *
+ * reserved_2
+ * Reserved: HW should fill with zero. FW should ignore.
+ *
+ * tid
+ * The TID field in the QoS control field
+ */
+
+#define RX_MPDU_END_INFO0_RESERVED_0_MASK 0x00001fff
+#define RX_MPDU_END_INFO0_RESERVED_0_LSB 0
+#define RX_MPDU_END_INFO0_POST_DELIM_CNT_MASK 0x0fff0000
+#define RX_MPDU_END_INFO0_POST_DELIM_CNT_LSB 16
+#define RX_MPDU_END_INFO0_OVERFLOW_ERR (1 << 13)
+#define RX_MPDU_END_INFO0_LAST_MPDU (1 << 14)
+#define RX_MPDU_END_INFO0_POST_DELIM_ERR (1 << 15)
+#define RX_MPDU_END_INFO0_MPDU_LENGTH_ERR (1 << 28)
+#define RX_MPDU_END_INFO0_TKIP_MIC_ERR (1 << 29)
+#define RX_MPDU_END_INFO0_DECRYPT_ERR (1 << 30)
+#define RX_MPDU_END_INFO0_FCS_ERR (1 << 31)
+
+struct rx_mpdu_end {
+ __le32 info0;
+} __packed;
+
+/*
+ * reserved_0
+ * Reserved
+ *
+ * overflow_err
+ * PCU Receive FIFO does not have enough space to store the
+ * full receive packet. Enough space is reserved in the
+ * receive FIFO for the status is written. This MPDU remaining
+ * packets in the PPDU will be filtered and no Ack response
+ * will be transmitted.
+ *
+ * last_mpdu
+ * Indicates that this is the last MPDU of a PPDU.
+ *
+ * post_delim_err
+ * Indicates that a delimiter FCS error occurred after this
+ * MPDU before the next MPDU. Only valid when last_msdu is
+ * set.
+ *
+ * post_delim_cnt
+ * Count of the delimiters after this MPDU. This requires the
+ * last MPDU to be held until all the EOF descriptors have been
+ * received. This may be inefficient in the future when
+ * ML-MIMO is used. Only valid when last_mpdu is set.
+ *
+ * mpdu_length_err
+ * See definition in RX attention descriptor
+ *
+ * tkip_mic_err
+ * See definition in RX attention descriptor
+ *
+ * decrypt_err
+ * See definition in RX attention descriptor
+ *
+ * fcs_err
+ * See definition in RX attention descriptor
+ */
+
+#define RX_MSDU_START_INFO0_MSDU_LENGTH_MASK 0x00003fff
+#define RX_MSDU_START_INFO0_MSDU_LENGTH_LSB 0
+#define RX_MSDU_START_INFO0_IP_OFFSET_MASK 0x000fc000
+#define RX_MSDU_START_INFO0_IP_OFFSET_LSB 14
+#define RX_MSDU_START_INFO0_RING_MASK_MASK 0x00f00000
+#define RX_MSDU_START_INFO0_RING_MASK_LSB 20
+#define RX_MSDU_START_INFO0_TCP_UDP_OFFSET_MASK 0x7f000000
+#define RX_MSDU_START_INFO0_TCP_UDP_OFFSET_LSB 24
+
+#define RX_MSDU_START_INFO1_MSDU_NUMBER_MASK 0x000000ff
+#define RX_MSDU_START_INFO1_MSDU_NUMBER_LSB 0
+#define RX_MSDU_START_INFO1_DECAP_FORMAT_MASK 0x00000300
+#define RX_MSDU_START_INFO1_DECAP_FORMAT_LSB 8
+#define RX_MSDU_START_INFO1_SA_IDX_MASK 0x07ff0000
+#define RX_MSDU_START_INFO1_SA_IDX_LSB 16
+#define RX_MSDU_START_INFO1_IPV4_PROTO (1 << 10)
+#define RX_MSDU_START_INFO1_IPV6_PROTO (1 << 11)
+#define RX_MSDU_START_INFO1_TCP_PROTO (1 << 12)
+#define RX_MSDU_START_INFO1_UDP_PROTO (1 << 13)
+#define RX_MSDU_START_INFO1_IP_FRAG (1 << 14)
+#define RX_MSDU_START_INFO1_TCP_ONLY_ACK (1 << 15)
+
+enum rx_msdu_decap_format {
+ RX_MSDU_DECAP_RAW = 0,
+ RX_MSDU_DECAP_NATIVE_WIFI = 1,
+ RX_MSDU_DECAP_ETHERNET2_DIX = 2,
+ RX_MSDU_DECAP_8023_SNAP_LLC = 3
+};
+
+struct rx_msdu_start {
+ __le32 info0; /* %RX_MSDU_START_INFO0_ */
+ __le32 flow_id_crc;
+ __le32 info1; /* %RX_MSDU_START_INFO1_ */
+} __packed;
+
+/*
+ * msdu_length
+ * MSDU length in bytes after decapsulation. This field is
+ * still valid for MPDU frames without A-MSDU. It still
+ * represents MSDU length after decapsulation
+ *
+ * ip_offset
+ * Indicates the IP offset in bytes from the start of the
+ * packet after decapsulation. Only valid if ipv4_proto or
+ * ipv6_proto is set.
+ *
+ * ring_mask
+ * Indicates the destination RX rings for this MSDU.
+ *
+ * tcp_udp_offset
+ * Indicates the offset in bytes to the start of TCP or UDP
+ * header from the start of the IP header after decapsulation.
+ * Only valid if tcp_prot or udp_prot is set. The value 0
+ * indicates that the offset is longer than 127 bytes.
+ *
+ * reserved_0c
+ * Reserved: HW should fill with zero. FW should ignore.
+ *
+ * flow_id_crc
+ * The flow_id_crc runs CRC32 on the following information:
+ * IPv4 option: dest_addr[31:0], src_addr [31:0], {24'b0,
+ * protocol[7:0]}.
+ * IPv6 option: dest_addr[127:0], src_addr [127:0], {24'b0,
+ * next_header[7:0]}
+ * UDP case: sort_port[15:0], dest_port[15:0]
+ * TCP case: sort_port[15:0], dest_port[15:0],
+ * {header_length[3:0], 6'b0, flags[5:0], window_size[15:0]},
+ * {16'b0, urgent_ptr[15:0]}, all options except 32-bit
+ * timestamp.
+ *
+ * msdu_number
+ * Indicates the MSDU number within a MPDU. This value is
+ * reset to zero at the start of each MPDU. If the number of
+ * MSDU exceeds 255 this number will wrap using modulo 256.
+ *
+ * decap_format
+ * Indicates the format after decapsulation:
+ * 0: RAW: No decapsulation
+ * 1: Native WiFi
+ * 2: Ethernet 2 (DIX)
+ * 3: 802.3 (SNAP/LLC)
+ *
+ * ipv4_proto
+ * Set if L2 layer indicates IPv4 protocol.
+ *
+ * ipv6_proto
+ * Set if L2 layer indicates IPv6 protocol.
+ *
+ * tcp_proto
+ * Set if the ipv4_proto or ipv6_proto are set and the IP
+ * protocol indicates TCP.
+ *
+ * udp_proto
+ * Set if the ipv4_proto or ipv6_proto are set and the IP
+ * protocol indicates UDP.
+ *
+ * ip_frag
+ * Indicates that either the IP More frag bit is set or IP frag
+ * number is non-zero. If set indicates that this is a
+ * fragmented IP packet.
+ *
+ * tcp_only_ack
+ * Set if only the TCP Ack bit is set in the TCP flags and if
+ * the TCP payload is 0.
+ *
+ * sa_idx
+ * The offset in the address table which matches the MAC source
+ * address.
+ *
+ * reserved_2b
+ * Reserved: HW should fill with zero. FW should ignore.
+ */
+
+#define RX_MSDU_END_INFO0_REPORTED_MPDU_LENGTH_MASK 0x00003fff
+#define RX_MSDU_END_INFO0_REPORTED_MPDU_LENGTH_LSB 0
+#define RX_MSDU_END_INFO0_FIRST_MSDU (1 << 14)
+#define RX_MSDU_END_INFO0_LAST_MSDU (1 << 15)
+#define RX_MSDU_END_INFO0_PRE_DELIM_ERR (1 << 30)
+#define RX_MSDU_END_INFO0_RESERVED_3B (1 << 31)
+
+struct rx_msdu_end {
+ __le16 ip_hdr_cksum;
+ __le16 tcp_hdr_cksum;
+ u8 key_id_octet;
+ u8 classification_filter;
+ u8 wapi_pn[10];
+ __le32 info0;
+} __packed;
+
+/*
+ *ip_hdr_chksum
+ * This can include the IP header checksum or the pseudo header
+ * checksum used by TCP/UDP checksum.
+ *
+ *tcp_udp_chksum
+ * The value of the computed TCP/UDP checksum. A mode bit
+ * selects whether this checksum is the full checksum or the
+ * partial checksum which does not include the pseudo header.
+ *
+ *key_id_octet
+ * The key ID octet from the IV. Only valid when first_msdu is
+ * set.
+ *
+ *classification_filter
+ * Indicates the number classification filter rule
+ *
+ *ext_wapi_pn_63_48
+ * Extension PN (packet number) which is only used by WAPI.
+ * This corresponds to WAPI PN bits [63:48] (pn6 and pn7). The
+ * WAPI PN bits [63:0] are in the pn field of the rx_mpdu_start
+ * descriptor.
+ *
+ *ext_wapi_pn_95_64
+ * Extension PN (packet number) which is only used by WAPI.
+ * This corresponds to WAPI PN bits [95:64] (pn8, pn9, pn10 and
+ * pn11).
+ *
+ *ext_wapi_pn_127_96
+ * Extension PN (packet number) which is only used by WAPI.
+ * This corresponds to WAPI PN bits [127:96] (pn12, pn13, pn14,
+ * pn15).
+ *
+ *reported_mpdu_length
+ * MPDU length before decapsulation. Only valid when
+ * first_msdu is set. This field is taken directly from the
+ * length field of the A-MPDU delimiter or the preamble length
+ * field for non-A-MPDU frames.
+ *
+ *first_msdu
+ * Indicates the first MSDU of A-MSDU. If both first_msdu and
+ * last_msdu are set in the MSDU then this is a non-aggregated
+ * MSDU frame: normal MPDU. Interior MSDU in an A-MSDU shall
+ * have both first_mpdu and last_mpdu bits set to 0.
+ *
+ *last_msdu
+ * Indicates the last MSDU of the A-MSDU. MPDU end status is
+ * only valid when last_msdu is set.
+ *
+ *reserved_3a
+ * Reserved: HW should fill with zero. FW should ignore.
+ *
+ *pre_delim_err
+ * Indicates that the first delimiter had a FCS failure. Only
+ * valid when first_mpdu and first_msdu are set.
+ *
+ *reserved_3b
+ * Reserved: HW should fill with zero. FW should ignore.
+ */
+
+#define RX_PPDU_START_SIG_RATE_SELECT_OFDM 0
+#define RX_PPDU_START_SIG_RATE_SELECT_CCK 1
+
+#define RX_PPDU_START_SIG_RATE_OFDM_48 0
+#define RX_PPDU_START_SIG_RATE_OFDM_24 1
+#define RX_PPDU_START_SIG_RATE_OFDM_12 2
+#define RX_PPDU_START_SIG_RATE_OFDM_6 3
+#define RX_PPDU_START_SIG_RATE_OFDM_54 4
+#define RX_PPDU_START_SIG_RATE_OFDM_36 5
+#define RX_PPDU_START_SIG_RATE_OFDM_18 6
+#define RX_PPDU_START_SIG_RATE_OFDM_9 7
+
+#define RX_PPDU_START_SIG_RATE_CCK_LP_11 0
+#define RX_PPDU_START_SIG_RATE_CCK_LP_5_5 1
+#define RX_PPDU_START_SIG_RATE_CCK_LP_2 2
+#define RX_PPDU_START_SIG_RATE_CCK_LP_1 3
+#define RX_PPDU_START_SIG_RATE_CCK_SP_11 4
+#define RX_PPDU_START_SIG_RATE_CCK_SP_5_5 5
+#define RX_PPDU_START_SIG_RATE_CCK_SP_2 6
+
+#define HTT_RX_PPDU_START_PREAMBLE_LEGACY 0x04
+#define HTT_RX_PPDU_START_PREAMBLE_HT 0x08
+#define HTT_RX_PPDU_START_PREAMBLE_HT_WITH_TXBF 0x09
+#define HTT_RX_PPDU_START_PREAMBLE_VHT 0x0C
+#define HTT_RX_PPDU_START_PREAMBLE_VHT_WITH_TXBF 0x0D
+
+#define RX_PPDU_START_INFO0_IS_GREENFIELD (1 << 0)
+
+#define RX_PPDU_START_INFO1_L_SIG_RATE_MASK 0x0000000f
+#define RX_PPDU_START_INFO1_L_SIG_RATE_LSB 0
+#define RX_PPDU_START_INFO1_L_SIG_LENGTH_MASK 0x0001ffe0
+#define RX_PPDU_START_INFO1_L_SIG_LENGTH_LSB 5
+#define RX_PPDU_START_INFO1_L_SIG_TAIL_MASK 0x00fc0000
+#define RX_PPDU_START_INFO1_L_SIG_TAIL_LSB 18
+#define RX_PPDU_START_INFO1_PREAMBLE_TYPE_MASK 0xff000000
+#define RX_PPDU_START_INFO1_PREAMBLE_TYPE_LSB 24
+#define RX_PPDU_START_INFO1_L_SIG_RATE_SELECT (1 << 4)
+#define RX_PPDU_START_INFO1_L_SIG_PARITY (1 << 17)
+
+#define RX_PPDU_START_INFO2_HT_SIG_VHT_SIG_A_1_MASK 0x00ffffff
+#define RX_PPDU_START_INFO2_HT_SIG_VHT_SIG_A_1_LSB 0
+
+#define RX_PPDU_START_INFO3_HT_SIG_VHT_SIG_A_2_MASK 0x00ffffff
+#define RX_PPDU_START_INFO3_HT_SIG_VHT_SIG_A_2_LSB 0
+#define RX_PPDU_START_INFO3_TXBF_H_INFO (1 << 24)
+
+#define RX_PPDU_START_INFO4_VHT_SIG_B_MASK 0x1fffffff
+#define RX_PPDU_START_INFO4_VHT_SIG_B_LSB 0
+
+#define RX_PPDU_START_INFO5_SERVICE_MASK 0x0000ffff
+#define RX_PPDU_START_INFO5_SERVICE_LSB 0
+
+struct rx_ppdu_start {
+ struct {
+ u8 pri20_mhz;
+ u8 ext20_mhz;
+ u8 ext40_mhz;
+ u8 ext80_mhz;
+ } rssi_chains[4];
+ u8 rssi_comb;
+ __le16 rsvd0;
+ u8 info0; /* %RX_PPDU_START_INFO0_ */
+ __le32 info1; /* %RX_PPDU_START_INFO1_ */
+ __le32 info2; /* %RX_PPDU_START_INFO2_ */
+ __le32 info3; /* %RX_PPDU_START_INFO3_ */
+ __le32 info4; /* %RX_PPDU_START_INFO4_ */
+ __le32 info5; /* %RX_PPDU_START_INFO5_ */
+} __packed;
+
+/*
+ * rssi_chain0_pri20
+ * RSSI of RX PPDU on chain 0 of primary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain0_sec20
+ * RSSI of RX PPDU on chain 0 of secondary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain0_sec40
+ * RSSI of RX PPDU on chain 0 of secondary 40 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain0_sec80
+ * RSSI of RX PPDU on chain 0 of secondary 80 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain1_pri20
+ * RSSI of RX PPDU on chain 1 of primary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain1_sec20
+ * RSSI of RX PPDU on chain 1 of secondary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain1_sec40
+ * RSSI of RX PPDU on chain 1 of secondary 40 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain1_sec80
+ * RSSI of RX PPDU on chain 1 of secondary 80 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain2_pri20
+ * RSSI of RX PPDU on chain 2 of primary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain2_sec20
+ * RSSI of RX PPDU on chain 2 of secondary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain2_sec40
+ * RSSI of RX PPDU on chain 2 of secondary 40 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain2_sec80
+ * RSSI of RX PPDU on chain 2 of secondary 80 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain3_pri20
+ * RSSI of RX PPDU on chain 3 of primary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain3_sec20
+ * RSSI of RX PPDU on chain 3 of secondary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain3_sec40
+ * RSSI of RX PPDU on chain 3 of secondary 40 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain3_sec80
+ * RSSI of RX PPDU on chain 3 of secondary 80 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_comb
+ * The combined RSSI of RX PPDU of all active chains and
+ * bandwidths. Value of 0x80 indicates invalid.
+ *
+ * reserved_4a
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * is_greenfield
+ * Do we really support this?
+ *
+ * reserved_4b
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * l_sig_rate
+ * If l_sig_rate_select is 0:
+ * 0x8: OFDM 48 Mbps
+ * 0x9: OFDM 24 Mbps
+ * 0xA: OFDM 12 Mbps
+ * 0xB: OFDM 6 Mbps
+ * 0xC: OFDM 54 Mbps
+ * 0xD: OFDM 36 Mbps
+ * 0xE: OFDM 18 Mbps
+ * 0xF: OFDM 9 Mbps
+ * If l_sig_rate_select is 1:
+ * 0x8: CCK 11 Mbps long preamble
+ * 0x9: CCK 5.5 Mbps long preamble
+ * 0xA: CCK 2 Mbps long preamble
+ * 0xB: CCK 1 Mbps long preamble
+ * 0xC: CCK 11 Mbps short preamble
+ * 0xD: CCK 5.5 Mbps short preamble
+ * 0xE: CCK 2 Mbps short preamble
+ *
+ * l_sig_rate_select
+ * Legacy signal rate select. If set then l_sig_rate indicates
+ * CCK rates. If clear then l_sig_rate indicates OFDM rates.
+ *
+ * l_sig_length
+ * Length of legacy frame in octets.
+ *
+ * l_sig_parity
+ * Odd parity over l_sig_rate and l_sig_length
+ *
+ * l_sig_tail
+ * Tail bits for Viterbi decoder
+ *
+ * preamble_type
+ * Indicates the type of preamble ahead:
+ * 0x4: Legacy (OFDM/CCK)
+ * 0x8: HT
+ * 0x9: HT with TxBF
+ * 0xC: VHT
+ * 0xD: VHT with TxBF
+ * 0x80 - 0xFF: Reserved for special baseband data types such
+ * as radar and spectral scan.
+ *
+ * ht_sig_vht_sig_a_1
+ * If preamble_type == 0x8 or 0x9
+ * HT-SIG (first 24 bits)
+ * If preamble_type == 0xC or 0xD
+ * VHT-SIG A (first 24 bits)
+ * Else
+ * Reserved
+ *
+ * reserved_6
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * ht_sig_vht_sig_a_2
+ * If preamble_type == 0x8 or 0x9
+ * HT-SIG (last 24 bits)
+ * If preamble_type == 0xC or 0xD
+ * VHT-SIG A (last 24 bits)
+ * Else
+ * Reserved
+ *
+ * txbf_h_info
+ * Indicates that the packet data carries H information which
+ * is used for TxBF debug.
+ *
+ * reserved_7
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * vht_sig_b
+ * WiFi 1.0 and WiFi 2.0 will likely have this field to be all
+ * 0s since the BB does not plan on decoding VHT SIG-B.
+ *
+ * reserved_8
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * service
+ * Service field from BB for OFDM, HT and VHT packets. CCK
+ * packets will have service field of 0.
+ *
+ * reserved_9
+ * Reserved: HW should fill with 0, FW should ignore.
+*/
+
+
+#define RX_PPDU_END_FLAGS_PHY_ERR (1 << 0)
+#define RX_PPDU_END_FLAGS_RX_LOCATION (1 << 1)
+#define RX_PPDU_END_FLAGS_TXBF_H_INFO (1 << 2)
+
+#define RX_PPDU_END_INFO0_RX_ANTENNA_MASK 0x00ffffff
+#define RX_PPDU_END_INFO0_RX_ANTENNA_LSB 0
+#define RX_PPDU_END_INFO0_FLAGS_TX_HT_VHT_ACK (1 << 24)
+#define RX_PPDU_END_INFO0_BB_CAPTURED_CHANNEL (1 << 25)
+
+#define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15)
+
+struct rx_ppdu_end {
+ __le32 evm_p0;
+ __le32 evm_p1;
+ __le32 evm_p2;
+ __le32 evm_p3;
+ __le32 evm_p4;
+ __le32 evm_p5;
+ __le32 evm_p6;
+ __le32 evm_p7;
+ __le32 evm_p8;
+ __le32 evm_p9;
+ __le32 evm_p10;
+ __le32 evm_p11;
+ __le32 evm_p12;
+ __le32 evm_p13;
+ __le32 evm_p14;
+ __le32 evm_p15;
+ __le32 tsf_timestamp;
+ __le32 wb_timestamp;
+ u8 locationing_timestamp;
+ u8 phy_err_code;
+ __le16 flags; /* %RX_PPDU_END_FLAGS_ */
+ __le32 info0; /* %RX_PPDU_END_INFO0_ */
+ __le16 bb_length;
+ __le16 info1; /* %RX_PPDU_END_INFO1_ */
+} __packed;
+
+/*
+ * evm_p0
+ * EVM for pilot 0. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p1
+ * EVM for pilot 1. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p2
+ * EVM for pilot 2. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p3
+ * EVM for pilot 3. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p4
+ * EVM for pilot 4. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p5
+ * EVM for pilot 5. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p6
+ * EVM for pilot 6. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p7
+ * EVM for pilot 7. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p8
+ * EVM for pilot 8. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p9
+ * EVM for pilot 9. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p10
+ * EVM for pilot 10. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p11
+ * EVM for pilot 11. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p12
+ * EVM for pilot 12. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p13
+ * EVM for pilot 13. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p14
+ * EVM for pilot 14. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p15
+ * EVM for pilot 15. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * tsf_timestamp
+ * Receive TSF timestamp sampled on the rising edge of
+ * rx_clear. For PHY errors this may be the current TSF when
+ * phy_error is asserted if the rx_clear does not assert before
+ * the end of the PHY error.
+ *
+ * wb_timestamp
+ * WLAN/BT timestamp is a 1 usec resolution timestamp which
+ * does not get updated based on receive beacon like TSF. The
+ * same rules for capturing tsf_timestamp are used to capture
+ * the wb_timestamp.
+ *
+ * locationing_timestamp
+ * Timestamp used for locationing. This timestamp is used to
+ * indicate fractions of usec. For example if the MAC clock is
+ * running at 80 MHz, the timestamp will increment every 12.5
+ * nsec. The value starts at 0 and increments to 79 and
+ * returns to 0 and repeats. This information is valid for
+ * every PPDU. This information can be used in conjunction
+ * with wb_timestamp to capture large delta times.
+ *
+ * phy_err_code
+ * See the 1.10.8.1.2 for the list of the PHY error codes.
+ *
+ * phy_err
+ * Indicates a PHY error was detected for this PPDU.
+ *
+ * rx_location
+ * Indicates that location information was requested.
+ *
+ * txbf_h_info
+ * Indicates that the packet data carries H information which
+ * is used for TxBF debug.
+ *
+ * reserved_18
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * rx_antenna
+ * Receive antenna value
+ *
+ * tx_ht_vht_ack
+ * Indicates that a HT or VHT Ack/BA frame was transmitted in
+ * response to this receive packet.
+ *
+ * bb_captured_channel
+ * Indicates that the BB has captured a channel dump. FW can
+ * then read the channel dump memory. This may indicate that
+ * the channel was captured either based on PCU setting the
+ * capture_channel bit BB descriptor or FW setting the
+ * capture_channel mode bit.
+ *
+ * reserved_19
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * bb_length
+ * Indicates the number of bytes of baseband information for
+ * PPDUs where the BB descriptor preamble type is 0x80 to 0xFF
+ * which indicates that this is not a normal PPDU but rather
+ * contains baseband debug information.
+ *
+ * reserved_20
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * ppdu_done
+ * PPDU end status is only valid when ppdu_done bit is set.
+ * Every time HW sets this bit in memory FW/SW must clear this
+ * bit in memory. FW will initialize all the ppdu_done dword
+ * to 0.
+*/
+
+#define FW_RX_DESC_INFO0_DISCARD (1 << 0)
+#define FW_RX_DESC_INFO0_FORWARD (1 << 1)
+#define FW_RX_DESC_INFO0_INSPECT (1 << 5)
+#define FW_RX_DESC_INFO0_EXT_MASK 0xC0
+#define FW_RX_DESC_INFO0_EXT_LSB 6
+
+struct fw_rx_desc_base {
+ u8 info0;
+} __packed;
+
+#endif /* _RX_DESC_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
new file mode 100644
index 000000000000..be7ba1e78afe
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -0,0 +1,449 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __TARGADDRS_H__
+#define __TARGADDRS_H__
+
+/*
+ * xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the
+ * host_interest structure. It must match the address of the _host_interest
+ * symbol (see linker script).
+ *
+ * Host Interest is shared between Host and Target in order to coordinate
+ * between the two, and is intended to remain constant (with additions only
+ * at the end) across software releases.
+ *
+ * All addresses are available here so that it's possible to
+ * write a single binary that works with all Target Types.
+ * May be used in assembler code as well as C.
+ */
+#define QCA988X_HOST_INTEREST_ADDRESS 0x00400800
+#define HOST_INTEREST_MAX_SIZE 0x200
+
+/*
+ * These are items that the Host may need to access via BMI or via the
+ * Diagnostic Window. The position of items in this structure must remain
+ * constant across firmware revisions! Types for each item must be fixed
+ * size across target and host platforms. More items may be added at the end.
+ */
+struct host_interest {
+ /*
+ * Pointer to application-defined area, if any.
+ * Set by Target application during startup.
+ */
+ u32 hi_app_host_interest; /* 0x00 */
+
+ /* Pointer to register dump area, valid after Target crash. */
+ u32 hi_failure_state; /* 0x04 */
+
+ /* Pointer to debug logging header */
+ u32 hi_dbglog_hdr; /* 0x08 */
+
+ u32 hi_unused0c; /* 0x0c */
+
+ /*
+ * General-purpose flag bits, similar to SOC_OPTION_* flags.
+ * Can be used by application rather than by OS.
+ */
+ u32 hi_option_flag; /* 0x10 */
+
+ /*
+ * Boolean that determines whether or not to
+ * display messages on the serial port.
+ */
+ u32 hi_serial_enable; /* 0x14 */
+
+ /* Start address of DataSet index, if any */
+ u32 hi_dset_list_head; /* 0x18 */
+
+ /* Override Target application start address */
+ u32 hi_app_start; /* 0x1c */
+
+ /* Clock and voltage tuning */
+ u32 hi_skip_clock_init; /* 0x20 */
+ u32 hi_core_clock_setting; /* 0x24 */
+ u32 hi_cpu_clock_setting; /* 0x28 */
+ u32 hi_system_sleep_setting; /* 0x2c */
+ u32 hi_xtal_control_setting; /* 0x30 */
+ u32 hi_pll_ctrl_setting_24ghz; /* 0x34 */
+ u32 hi_pll_ctrl_setting_5ghz; /* 0x38 */
+ u32 hi_ref_voltage_trim_setting; /* 0x3c */
+ u32 hi_clock_info; /* 0x40 */
+
+ /* Host uses BE CPU or not */
+ u32 hi_be; /* 0x44 */
+
+ u32 hi_stack; /* normal stack */ /* 0x48 */
+ u32 hi_err_stack; /* error stack */ /* 0x4c */
+ u32 hi_desired_cpu_speed_hz; /* 0x50 */
+
+ /* Pointer to Board Data */
+ u32 hi_board_data; /* 0x54 */
+
+ /*
+ * Indication of Board Data state:
+ * 0: board data is not yet initialized.
+ * 1: board data is initialized; unknown size
+ * >1: number of bytes of initialized board data
+ */
+ u32 hi_board_data_initialized; /* 0x58 */
+
+ u32 hi_dset_ram_index_table; /* 0x5c */
+
+ u32 hi_desired_baud_rate; /* 0x60 */
+ u32 hi_dbglog_config; /* 0x64 */
+ u32 hi_end_ram_reserve_sz; /* 0x68 */
+ u32 hi_mbox_io_block_sz; /* 0x6c */
+
+ u32 hi_num_bpatch_streams; /* 0x70 -- unused */
+ u32 hi_mbox_isr_yield_limit; /* 0x74 */
+
+ u32 hi_refclk_hz; /* 0x78 */
+ u32 hi_ext_clk_detected; /* 0x7c */
+ u32 hi_dbg_uart_txpin; /* 0x80 */
+ u32 hi_dbg_uart_rxpin; /* 0x84 */
+ u32 hi_hci_uart_baud; /* 0x88 */
+ u32 hi_hci_uart_pin_assignments; /* 0x8C */
+
+ u32 hi_hci_uart_baud_scale_val; /* 0x90 */
+ u32 hi_hci_uart_baud_step_val; /* 0x94 */
+
+ u32 hi_allocram_start; /* 0x98 */
+ u32 hi_allocram_sz; /* 0x9c */
+ u32 hi_hci_bridge_flags; /* 0xa0 */
+ u32 hi_hci_uart_support_pins; /* 0xa4 */
+
+ u32 hi_hci_uart_pwr_mgmt_params; /* 0xa8 */
+
+ /*
+ * 0xa8 - [1]: 0 = UART FC active low, 1 = UART FC active high
+ * [31:16]: wakeup timeout in ms
+ */
+ /* Pointer to extended board Data */
+ u32 hi_board_ext_data; /* 0xac */
+ u32 hi_board_ext_data_config; /* 0xb0 */
+ /*
+ * Bit [0] : valid
+ * Bit[31:16: size
+ */
+ /*
+ * hi_reset_flag is used to do some stuff when target reset.
+ * such as restore app_start after warm reset or
+ * preserve host Interest area, or preserve ROM data, literals etc.
+ */
+ u32 hi_reset_flag; /* 0xb4 */
+ /* indicate hi_reset_flag is valid */
+ u32 hi_reset_flag_valid; /* 0xb8 */
+ u32 hi_hci_uart_pwr_mgmt_params_ext; /* 0xbc */
+ /* 0xbc - [31:0]: idle timeout in ms */
+ /* ACS flags */
+ u32 hi_acs_flags; /* 0xc0 */
+ u32 hi_console_flags; /* 0xc4 */
+ u32 hi_nvram_state; /* 0xc8 */
+ u32 hi_option_flag2; /* 0xcc */
+
+ /* If non-zero, override values sent to Host in WMI_READY event. */
+ u32 hi_sw_version_override; /* 0xd0 */
+ u32 hi_abi_version_override; /* 0xd4 */
+
+ /*
+ * Percentage of high priority RX traffic to total expected RX traffic
+ * applicable only to ar6004
+ */
+ u32 hi_hp_rx_traffic_ratio; /* 0xd8 */
+
+ /* test applications flags */
+ u32 hi_test_apps_related; /* 0xdc */
+ /* location of test script */
+ u32 hi_ota_testscript; /* 0xe0 */
+ /* location of CAL data */
+ u32 hi_cal_data; /* 0xe4 */
+
+ /* Number of packet log buffers */
+ u32 hi_pktlog_num_buffers; /* 0xe8 */
+
+ /* wow extension configuration */
+ u32 hi_wow_ext_config; /* 0xec */
+ u32 hi_pwr_save_flags; /* 0xf0 */
+
+ /* Spatial Multiplexing Power Save (SMPS) options */
+ u32 hi_smps_options; /* 0xf4 */
+
+ /* Interconnect-specific state */
+ u32 hi_interconnect_state; /* 0xf8 */
+
+ /* Coex configuration flags */
+ u32 hi_coex_config; /* 0xfc */
+
+ /* Early allocation support */
+ u32 hi_early_alloc; /* 0x100 */
+ /* FW swap field */
+ /*
+ * Bits of this 32bit word will be used to pass specific swap
+ * instruction to FW
+ */
+ /*
+ * Bit 0 -- AP Nart descriptor no swap. When this bit is set
+ * FW will not swap TX descriptor. Meaning packets are formed
+ * on the target processor.
+ */
+ /* Bit 1 - unused */
+ u32 hi_fw_swap; /* 0x104 */
+} __packed;
+
+#define HI_ITEM(item) offsetof(struct host_interest, item)
+
+/* Bits defined in hi_option_flag */
+
+/* Enable timer workaround */
+#define HI_OPTION_TIMER_WAR 0x01
+/* Limit BMI command credits */
+#define HI_OPTION_BMI_CRED_LIMIT 0x02
+/* Relay Dot11 hdr to/from host */
+#define HI_OPTION_RELAY_DOT11_HDR 0x04
+/* MAC addr method 0-locally administred 1-globally unique addrs */
+#define HI_OPTION_MAC_ADDR_METHOD 0x08
+/* Firmware Bridging */
+#define HI_OPTION_FW_BRIDGE 0x10
+/* Enable CPU profiling */
+#define HI_OPTION_ENABLE_PROFILE 0x20
+/* Disable debug logging */
+#define HI_OPTION_DISABLE_DBGLOG 0x40
+/* Skip Era Tracking */
+#define HI_OPTION_SKIP_ERA_TRACKING 0x80
+/* Disable PAPRD (debug) */
+#define HI_OPTION_PAPRD_DISABLE 0x100
+#define HI_OPTION_NUM_DEV_LSB 0x200
+#define HI_OPTION_NUM_DEV_MSB 0x800
+#define HI_OPTION_DEV_MODE_LSB 0x1000
+#define HI_OPTION_DEV_MODE_MSB 0x8000000
+/* Disable LowFreq Timer Stabilization */
+#define HI_OPTION_NO_LFT_STBL 0x10000000
+/* Skip regulatory scan */
+#define HI_OPTION_SKIP_REG_SCAN 0x20000000
+/*
+ * Do regulatory scan during init before
+ * sending WMI ready event to host
+ */
+#define HI_OPTION_INIT_REG_SCAN 0x40000000
+
+/* REV6: Do not adjust memory map */
+#define HI_OPTION_SKIP_MEMMAP 0x80000000
+
+#define HI_OPTION_MAC_ADDR_METHOD_SHIFT 3
+
+/* 2 bits of hi_option_flag are used to represent 3 modes */
+#define HI_OPTION_FW_MODE_IBSS 0x0 /* IBSS Mode */
+#define HI_OPTION_FW_MODE_BSS_STA 0x1 /* STA Mode */
+#define HI_OPTION_FW_MODE_AP 0x2 /* AP Mode */
+#define HI_OPTION_FW_MODE_BT30AMP 0x3 /* BT30 AMP Mode */
+
+/* 2 bits of hi_option flag are usedto represent 4 submodes */
+#define HI_OPTION_FW_SUBMODE_NONE 0x0 /* Normal mode */
+#define HI_OPTION_FW_SUBMODE_P2PDEV 0x1 /* p2p device mode */
+#define HI_OPTION_FW_SUBMODE_P2PCLIENT 0x2 /* p2p client mode */
+#define HI_OPTION_FW_SUBMODE_P2PGO 0x3 /* p2p go mode */
+
+/* Num dev Mask */
+#define HI_OPTION_NUM_DEV_MASK 0x7
+#define HI_OPTION_NUM_DEV_SHIFT 0x9
+
+/* firmware bridging */
+#define HI_OPTION_FW_BRIDGE_SHIFT 0x04
+
+/*
+Fw Mode/SubMode Mask
+|-----------------------------------------------------------------------------|
+| SUB | SUB | SUB | SUB | | | | |
+|MODE[3] | MODE[2] | MODE[1] | MODE[0] | MODE[3] | MODE[2] | MODE[1] | MODE[0]|
+| (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2) |
+|-----------------------------------------------------------------------------|
+*/
+#define HI_OPTION_FW_MODE_BITS 0x2
+#define HI_OPTION_FW_MODE_MASK 0x3
+#define HI_OPTION_FW_MODE_SHIFT 0xC
+#define HI_OPTION_ALL_FW_MODE_MASK 0xFF
+
+#define HI_OPTION_FW_SUBMODE_BITS 0x2
+#define HI_OPTION_FW_SUBMODE_MASK 0x3
+#define HI_OPTION_FW_SUBMODE_SHIFT 0x14
+#define HI_OPTION_ALL_FW_SUBMODE_MASK 0xFF00
+#define HI_OPTION_ALL_FW_SUBMODE_SHIFT 0x8
+
+
+/* hi_option_flag2 options */
+#define HI_OPTION_OFFLOAD_AMSDU 0x01
+#define HI_OPTION_DFS_SUPPORT 0x02 /* Enable DFS support */
+#define HI_OPTION_ENABLE_RFKILL 0x04 /* RFKill Enable Feature*/
+#define HI_OPTION_RADIO_RETENTION_DISABLE 0x08 /* Disable radio retention */
+#define HI_OPTION_EARLY_CFG_DONE 0x10 /* Early configuration is complete */
+
+#define HI_OPTION_RF_KILL_SHIFT 0x2
+#define HI_OPTION_RF_KILL_MASK 0x1
+
+/* hi_reset_flag */
+/* preserve App Start address */
+#define HI_RESET_FLAG_PRESERVE_APP_START 0x01
+/* preserve host interest */
+#define HI_RESET_FLAG_PRESERVE_HOST_INTEREST 0x02
+/* preserve ROM data */
+#define HI_RESET_FLAG_PRESERVE_ROMDATA 0x04
+#define HI_RESET_FLAG_PRESERVE_NVRAM_STATE 0x08
+#define HI_RESET_FLAG_PRESERVE_BOOT_INFO 0x10
+#define HI_RESET_FLAG_WARM_RESET 0x20
+
+/* define hi_fw_swap bits */
+#define HI_DESC_IN_FW_BIT 0x01
+
+/* indicate the reset flag is valid */
+#define HI_RESET_FLAG_IS_VALID 0x12345678
+
+/* ACS is enabled */
+#define HI_ACS_FLAGS_ENABLED (1 << 0)
+/* Use physical WWAN device */
+#define HI_ACS_FLAGS_USE_WWAN (1 << 1)
+/* Use test VAP */
+#define HI_ACS_FLAGS_TEST_VAP (1 << 2)
+
+/*
+ * CONSOLE FLAGS
+ *
+ * Bit Range Meaning
+ * --------- --------------------------------
+ * 2..0 UART ID (0 = Default)
+ * 3 Baud Select (0 = 9600, 1 = 115200)
+ * 30..4 Reserved
+ * 31 Enable Console
+ *
+ */
+
+#define HI_CONSOLE_FLAGS_ENABLE (1 << 31)
+#define HI_CONSOLE_FLAGS_UART_MASK (0x7)
+#define HI_CONSOLE_FLAGS_UART_SHIFT 0
+#define HI_CONSOLE_FLAGS_BAUD_SELECT (1 << 3)
+
+/* SM power save options */
+#define HI_SMPS_ALLOW_MASK (0x00000001)
+#define HI_SMPS_MODE_MASK (0x00000002)
+#define HI_SMPS_MODE_STATIC (0x00000000)
+#define HI_SMPS_MODE_DYNAMIC (0x00000002)
+#define HI_SMPS_DISABLE_AUTO_MODE (0x00000004)
+#define HI_SMPS_DATA_THRESH_MASK (0x000007f8)
+#define HI_SMPS_DATA_THRESH_SHIFT (3)
+#define HI_SMPS_RSSI_THRESH_MASK (0x0007f800)
+#define HI_SMPS_RSSI_THRESH_SHIFT (11)
+#define HI_SMPS_LOWPWR_CM_MASK (0x00380000)
+#define HI_SMPS_LOWPWR_CM_SHIFT (15)
+#define HI_SMPS_HIPWR_CM_MASK (0x03c00000)
+#define HI_SMPS_HIPWR_CM_SHIFT (19)
+
+/*
+ * WOW Extension configuration
+ *
+ * Bit Range Meaning
+ * --------- --------------------------------
+ * 8..0 Size of each WOW pattern (max 511)
+ * 15..9 Number of patterns per list (max 127)
+ * 17..16 Number of lists (max 4)
+ * 30..18 Reserved
+ * 31 Enabled
+ *
+ * set values (except enable) to zeros for default settings
+ */
+
+#define HI_WOW_EXT_ENABLED_MASK (1 << 31)
+#define HI_WOW_EXT_NUM_LIST_SHIFT 16
+#define HI_WOW_EXT_NUM_LIST_MASK (0x3 << HI_WOW_EXT_NUM_LIST_SHIFT)
+#define HI_WOW_EXT_NUM_PATTERNS_SHIFT 9
+#define HI_WOW_EXT_NUM_PATTERNS_MASK (0x7F << HI_WOW_EXT_NUM_PATTERNS_SHIFT)
+#define HI_WOW_EXT_PATTERN_SIZE_SHIFT 0
+#define HI_WOW_EXT_PATTERN_SIZE_MASK (0x1FF << HI_WOW_EXT_PATTERN_SIZE_SHIFT)
+
+#define HI_WOW_EXT_MAKE_CONFIG(num_lists, count, size) \
+ ((((num_lists) << HI_WOW_EXT_NUM_LIST_SHIFT) & \
+ HI_WOW_EXT_NUM_LIST_MASK) | \
+ (((count) << HI_WOW_EXT_NUM_PATTERNS_SHIFT) & \
+ HI_WOW_EXT_NUM_PATTERNS_MASK) | \
+ (((size) << HI_WOW_EXT_PATTERN_SIZE_SHIFT) & \
+ HI_WOW_EXT_PATTERN_SIZE_MASK))
+
+#define HI_WOW_EXT_GET_NUM_LISTS(config) \
+ (((config) & HI_WOW_EXT_NUM_LIST_MASK) >> HI_WOW_EXT_NUM_LIST_SHIFT)
+#define HI_WOW_EXT_GET_NUM_PATTERNS(config) \
+ (((config) & HI_WOW_EXT_NUM_PATTERNS_MASK) >> \
+ HI_WOW_EXT_NUM_PATTERNS_SHIFT)
+#define HI_WOW_EXT_GET_PATTERN_SIZE(config) \
+ (((config) & HI_WOW_EXT_PATTERN_SIZE_MASK) >> \
+ HI_WOW_EXT_PATTERN_SIZE_SHIFT)
+
+/*
+ * Early allocation configuration
+ * Support RAM bank configuration before BMI done and this eases the memory
+ * allocation at very early stage
+ * Bit Range Meaning
+ * --------- ----------------------------------
+ * [0:3] number of bank assigned to be IRAM
+ * [4:15] reserved
+ * [16:31] magic number
+ *
+ * Note:
+ * 1. target firmware would check magic number and if it's a match, firmware
+ * would consider the bits[0:15] are valid and base on that to calculate
+ * the end of DRAM. Early allocation would be located at that area and
+ * may be reclaimed when necesary
+ * 2. if no magic number is found, early allocation would happen at "_end"
+ * symbol of ROM which is located before the app-data and might NOT be
+ * re-claimable. If this is adopted, link script should keep this in
+ * mind to avoid data corruption.
+ */
+#define HI_EARLY_ALLOC_MAGIC 0x6d8a
+#define HI_EARLY_ALLOC_MAGIC_MASK 0xffff0000
+#define HI_EARLY_ALLOC_MAGIC_SHIFT 16
+#define HI_EARLY_ALLOC_IRAM_BANKS_MASK 0x0000000f
+#define HI_EARLY_ALLOC_IRAM_BANKS_SHIFT 0
+
+#define HI_EARLY_ALLOC_VALID() \
+ ((((HOST_INTEREST->hi_early_alloc) & HI_EARLY_ALLOC_MAGIC_MASK) >> \
+ HI_EARLY_ALLOC_MAGIC_SHIFT) == (HI_EARLY_ALLOC_MAGIC))
+#define HI_EARLY_ALLOC_GET_IRAM_BANKS() \
+ (((HOST_INTEREST->hi_early_alloc) & HI_EARLY_ALLOC_IRAM_BANKS_MASK) \
+ >> HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
+
+/*power save flag bit definitions*/
+#define HI_PWR_SAVE_LPL_ENABLED 0x1
+/*b1-b3 reserved*/
+/*b4-b5 : dev0 LPL type : 0 - none
+ 1- Reduce Pwr Search
+ 2- Reduce Pwr Listen*/
+/*b6-b7 : dev1 LPL type and so on for Max 8 devices*/
+#define HI_PWR_SAVE_LPL_DEV0_LSB 4
+#define HI_PWR_SAVE_LPL_DEV_MASK 0x3
+/*power save related utility macros*/
+#define HI_LPL_ENABLED() \
+ ((HOST_INTEREST->hi_pwr_save_flags & HI_PWR_SAVE_LPL_ENABLED))
+#define HI_DEV_LPL_TYPE_GET(_devix) \
+ (HOST_INTEREST->hi_pwr_save_flags & ((HI_PWR_SAVE_LPL_DEV_MASK) << \
+ (HI_PWR_SAVE_LPL_DEV0_LSB + (_devix)*2)))
+
+#define HOST_INTEREST_SMPS_IS_ALLOWED() \
+ ((HOST_INTEREST->hi_smps_options & HI_SMPS_ALLOW_MASK))
+
+/* Reserve 1024 bytes for extended board data */
+#define QCA988X_BOARD_DATA_SZ 7168
+#define QCA988X_BOARD_EXT_DATA_SZ 0
+
+#endif /* __TARGADDRS_H__ */
diff --git a/drivers/net/wireless/ath/ath10k/trace.c b/drivers/net/wireless/ath/ath10k/trace.c
new file mode 100644
index 000000000000..4a31e2c6fbd4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/trace.c
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
new file mode 100644
index 000000000000..85e806bf7257
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+
+#include <linux/tracepoint.h>
+
+#define _TRACE_H_
+
+/* create empty functions when tracing is disabled */
+#if !defined(CONFIG_ATH10K_TRACING)
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(...)
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(evt_class, name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif /* !CONFIG_ATH10K_TRACING || __CHECKER__ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ath10k
+
+#define ATH10K_MSG_MAX 200
+
+DECLARE_EVENT_CLASS(ath10k_log_event,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf),
+ TP_STRUCT__entry(
+ __dynamic_array(char, msg, ATH10K_MSG_MAX)
+ ),
+ TP_fast_assign(
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ ATH10K_MSG_MAX,
+ vaf->fmt,
+ *vaf->va) >= ATH10K_MSG_MAX);
+ ),
+ TP_printk("%s", __get_str(msg))
+);
+
+DEFINE_EVENT(ath10k_log_event, ath10k_log_err,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(ath10k_log_event, ath10k_log_warn,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(ath10k_log_event, ath10k_log_info,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+TRACE_EVENT(ath10k_log_dbg,
+ TP_PROTO(unsigned int level, struct va_format *vaf),
+ TP_ARGS(level, vaf),
+ TP_STRUCT__entry(
+ __field(unsigned int, level)
+ __dynamic_array(char, msg, ATH10K_MSG_MAX)
+ ),
+ TP_fast_assign(
+ __entry->level = level;
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ ATH10K_MSG_MAX,
+ vaf->fmt,
+ *vaf->va) >= ATH10K_MSG_MAX);
+ ),
+ TP_printk("%s", __get_str(msg))
+);
+
+TRACE_EVENT(ath10k_log_dbg_dump,
+ TP_PROTO(const char *msg, const char *prefix,
+ const void *buf, size_t buf_len),
+
+ TP_ARGS(msg, prefix, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(msg, msg)
+ __string(prefix, prefix)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(msg, msg);
+ __assign_str(prefix, prefix);
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s/%s\n", __get_str(prefix), __get_str(msg)
+ )
+);
+
+TRACE_EVENT(ath10k_wmi_cmd,
+ TP_PROTO(int id, void *buf, size_t buf_len),
+
+ TP_ARGS(id, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "id %d len %zu",
+ __entry->id,
+ __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath10k_wmi_event,
+ TP_PROTO(int id, void *buf, size_t buf_len),
+
+ TP_ARGS(id, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "id %d len %zu",
+ __entry->id,
+ __entry->buf_len
+ )
+);
+
+#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
+
+/* we don't want to use include/trace/events */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
new file mode 100644
index 000000000000..68b6faefd1d8
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -0,0 +1,417 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "txrx.h"
+#include "htt.h"
+#include "mac.h"
+#include "debug.h"
+
+static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
+{
+ if (!ATH10K_SKB_CB(skb)->htt.is_offchan)
+ return;
+
+ /* If the original wait_for_completion() timed out before
+ * {data,mgmt}_tx_completed() was called then we could complete
+ * offchan_tx_completed for a different skb. Prevent this by using
+ * offchan_tx_skb. */
+ spin_lock_bh(&ar->data_lock);
+ if (ar->offchan_tx_skb != skb) {
+ ath10k_warn("completed old offchannel frame\n");
+ goto out;
+ }
+
+ complete(&ar->offchan_tx_completed);
+ ar->offchan_tx_skb = NULL; /* just for sanity */
+
+ ath10k_dbg(ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
+out:
+ spin_unlock_bh(&ar->data_lock);
+}
+
+void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
+{
+ struct device *dev = htt->ar->dev;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *txfrag = ATH10K_SKB_CB(txdesc)->htt.txfrag;
+ struct sk_buff *msdu = ATH10K_SKB_CB(txdesc)->htt.msdu;
+ int ret;
+
+ if (ATH10K_SKB_CB(txdesc)->htt.refcount == 0)
+ return;
+
+ ATH10K_SKB_CB(txdesc)->htt.refcount--;
+
+ if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0)
+ return;
+
+ if (txfrag) {
+ ret = ath10k_skb_unmap(dev, txfrag);
+ if (ret)
+ ath10k_warn("txfrag unmap failed (%d)\n", ret);
+
+ dev_kfree_skb_any(txfrag);
+ }
+
+ ret = ath10k_skb_unmap(dev, msdu);
+ if (ret)
+ ath10k_warn("data skb unmap failed (%d)\n", ret);
+
+ ath10k_report_offchan_tx(htt->ar, msdu);
+
+ info = IEEE80211_SKB_CB(msdu);
+ memset(&info->status, 0, sizeof(info->status));
+
+ if (ATH10K_SKB_CB(txdesc)->htt.discard) {
+ ieee80211_free_txskb(htt->ar->hw, msdu);
+ goto exit;
+ }
+
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ if (ATH10K_SKB_CB(txdesc)->htt.no_ack)
+ info->flags &= ~IEEE80211_TX_STAT_ACK;
+
+ ieee80211_tx_status(htt->ar->hw, msdu);
+ /* we do not own the msdu anymore */
+
+exit:
+ spin_lock_bh(&htt->tx_lock);
+ htt->pending_tx[ATH10K_SKB_CB(txdesc)->htt.msdu_id] = NULL;
+ ath10k_htt_tx_free_msdu_id(htt, ATH10K_SKB_CB(txdesc)->htt.msdu_id);
+ __ath10k_htt_tx_dec_pending(htt);
+ if (bitmap_empty(htt->used_msdu_ids, htt->max_num_pending_tx))
+ wake_up(&htt->empty_tx_wq);
+ spin_unlock_bh(&htt->tx_lock);
+
+ dev_kfree_skb_any(txdesc);
+}
+
+void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
+ const struct htt_tx_done *tx_done)
+{
+ struct sk_buff *txdesc;
+
+ ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
+ tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
+
+ if (tx_done->msdu_id >= htt->max_num_pending_tx) {
+ ath10k_warn("warning: msdu_id %d too big, ignoring\n",
+ tx_done->msdu_id);
+ return;
+ }
+
+ txdesc = htt->pending_tx[tx_done->msdu_id];
+
+ ATH10K_SKB_CB(txdesc)->htt.discard = tx_done->discard;
+ ATH10K_SKB_CB(txdesc)->htt.no_ack = tx_done->no_ack;
+
+ ath10k_txrx_tx_unref(htt, txdesc);
+}
+
+static const u8 rx_legacy_rate_idx[] = {
+ 3, /* 0x00 - 11Mbps */
+ 2, /* 0x01 - 5.5Mbps */
+ 1, /* 0x02 - 2Mbps */
+ 0, /* 0x03 - 1Mbps */
+ 3, /* 0x04 - 11Mbps */
+ 2, /* 0x05 - 5.5Mbps */
+ 1, /* 0x06 - 2Mbps */
+ 0, /* 0x07 - 1Mbps */
+ 10, /* 0x08 - 48Mbps */
+ 8, /* 0x09 - 24Mbps */
+ 6, /* 0x0A - 12Mbps */
+ 4, /* 0x0B - 6Mbps */
+ 11, /* 0x0C - 54Mbps */
+ 9, /* 0x0D - 36Mbps */
+ 7, /* 0x0E - 18Mbps */
+ 5, /* 0x0F - 9Mbps */
+};
+
+static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info,
+ enum ieee80211_band band,
+ struct ieee80211_rx_status *status)
+{
+ u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
+ u8 info0 = info->rate.info0;
+ u32 info1 = info->rate.info1;
+ u32 info2 = info->rate.info2;
+ u8 preamble = 0;
+
+ /* Check if valid fields */
+ if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
+ return;
+
+ preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
+
+ switch (preamble) {
+ case HTT_RX_LEGACY:
+ cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
+ rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
+ rate_idx = 0;
+
+ if (rate < 0x08 || rate > 0x0F)
+ break;
+
+ switch (band) {
+ case IEEE80211_BAND_2GHZ:
+ if (cck)
+ rate &= ~BIT(3);
+ rate_idx = rx_legacy_rate_idx[rate];
+ break;
+ case IEEE80211_BAND_5GHZ:
+ rate_idx = rx_legacy_rate_idx[rate];
+ /* We are using same rate table registering
+ HW - ath10k_rates[]. In case of 5GHz skip
+ CCK rates, so -4 here */
+ rate_idx -= 4;
+ break;
+ default:
+ break;
+ }
+
+ status->rate_idx = rate_idx;
+ break;
+ case HTT_RX_HT:
+ case HTT_RX_HT_WITH_TXBF:
+ /* HT-SIG - Table 20-11 in info1 and info2 */
+ mcs = info1 & 0x1F;
+ nss = mcs >> 3;
+ bw = (info1 >> 7) & 1;
+ sgi = (info2 >> 7) & 1;
+
+ status->rate_idx = mcs;
+ status->flag |= RX_FLAG_HT;
+ if (sgi)
+ status->flag |= RX_FLAG_SHORT_GI;
+ if (bw)
+ status->flag |= RX_FLAG_40MHZ;
+ break;
+ case HTT_RX_VHT:
+ case HTT_RX_VHT_WITH_TXBF:
+ /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
+ TODO check this */
+ mcs = (info2 >> 4) & 0x0F;
+ nss = (info1 >> 10) & 0x07;
+ bw = info1 & 3;
+ sgi = info2 & 1;
+
+ status->rate_idx = mcs;
+ status->vht_nss = nss;
+
+ if (sgi)
+ status->flag |= RX_FLAG_SHORT_GI;
+
+ switch (bw) {
+ /* 20MHZ */
+ case 0:
+ break;
+ /* 40MHZ */
+ case 1:
+ status->flag |= RX_FLAG_40MHZ;
+ break;
+ /* 80MHZ */
+ case 2:
+ status->flag |= RX_FLAG_80MHZ;
+ }
+
+ status->flag |= RX_FLAG_VHT;
+ break;
+ default:
+ break;
+ }
+}
+
+void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
+{
+ struct ieee80211_rx_status *status;
+ struct ieee80211_channel *ch;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)info->skb->data;
+
+ status = IEEE80211_SKB_RXCB(info->skb);
+ memset(status, 0, sizeof(*status));
+
+ if (info->encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
+ status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED;
+ hdr->frame_control = __cpu_to_le16(
+ __le16_to_cpu(hdr->frame_control) &
+ ~IEEE80211_FCTL_PROTECTED);
+ }
+
+ if (info->status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR)
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (info->fcs_err)
+ status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+ status->signal = info->signal;
+
+ spin_lock_bh(&ar->data_lock);
+ ch = ar->scan_channel;
+ if (!ch)
+ ch = ar->rx_channel;
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!ch) {
+ ath10k_warn("no channel configured; ignoring frame!\n");
+ dev_kfree_skb_any(info->skb);
+ return;
+ }
+
+ process_rx_rates(ar, info, ch->band, status);
+ status->band = ch->band;
+ status->freq = ch->center_freq;
+
+ ath10k_dbg(ATH10K_DBG_DATA,
+ "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u\n",
+ info->skb,
+ info->skb->len,
+ status->flag == 0 ? "legacy" : "",
+ status->flag & RX_FLAG_HT ? "ht" : "",
+ status->flag & RX_FLAG_VHT ? "vht" : "",
+ status->flag & RX_FLAG_40MHZ ? "40" : "",
+ status->flag & RX_FLAG_80MHZ ? "80" : "",
+ status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
+ status->rate_idx,
+ status->vht_nss,
+ status->freq,
+ status->band);
+
+ ieee80211_rx(ar->hw, info->skb);
+}
+
+struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
+ const u8 *addr)
+{
+ struct ath10k_peer *peer;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ list_for_each_entry(peer, &ar->peers, list) {
+ if (peer->vdev_id != vdev_id)
+ continue;
+ if (memcmp(peer->addr, addr, ETH_ALEN))
+ continue;
+
+ return peer;
+ }
+
+ return NULL;
+}
+
+static struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar,
+ int peer_id)
+{
+ struct ath10k_peer *peer;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ list_for_each_entry(peer, &ar->peers, list)
+ if (test_bit(peer_id, peer->peer_ids))
+ return peer;
+
+ return NULL;
+}
+
+static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
+ const u8 *addr, bool expect_mapped)
+{
+ int ret;
+
+ ret = wait_event_timeout(ar->peer_mapping_wq, ({
+ bool mapped;
+
+ spin_lock_bh(&ar->data_lock);
+ mapped = !!ath10k_peer_find(ar, vdev_id, addr);
+ spin_unlock_bh(&ar->data_lock);
+
+ mapped == expect_mapped;
+ }), 3*HZ);
+
+ if (ret <= 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id, const u8 *addr)
+{
+ return ath10k_wait_for_peer_common(ar, vdev_id, addr, true);
+}
+
+int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id, const u8 *addr)
+{
+ return ath10k_wait_for_peer_common(ar, vdev_id, addr, false);
+}
+
+void ath10k_peer_map_event(struct ath10k_htt *htt,
+ struct htt_peer_map_event *ev)
+{
+ struct ath10k *ar = htt->ar;
+ struct ath10k_peer *peer;
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr);
+ if (!peer) {
+ peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
+ if (!peer)
+ goto exit;
+
+ peer->vdev_id = ev->vdev_id;
+ memcpy(peer->addr, ev->addr, ETH_ALEN);
+ list_add(&peer->list, &ar->peers);
+ wake_up(&ar->peer_mapping_wq);
+ }
+
+ ath10k_dbg(ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
+ ev->vdev_id, ev->addr, ev->peer_id);
+
+ set_bit(ev->peer_id, peer->peer_ids);
+exit:
+ spin_unlock_bh(&ar->data_lock);
+}
+
+void ath10k_peer_unmap_event(struct ath10k_htt *htt,
+ struct htt_peer_unmap_event *ev)
+{
+ struct ath10k *ar = htt->ar;
+ struct ath10k_peer *peer;
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find_by_id(ar, ev->peer_id);
+ if (!peer) {
+ ath10k_warn("unknown peer id %d\n", ev->peer_id);
+ goto exit;
+ }
+
+ ath10k_dbg(ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
+ peer->vdev_id, peer->addr, ev->peer_id);
+
+ clear_bit(ev->peer_id, peer->peer_ids);
+
+ if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) {
+ list_del(&peer->list);
+ kfree(peer);
+ wake_up(&ar->peer_mapping_wq);
+ }
+
+exit:
+ spin_unlock_bh(&ar->data_lock);
+}
diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h
new file mode 100644
index 000000000000..e78632a76df7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/txrx.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _TXRX_H_
+#define _TXRX_H_
+
+#include "htt.h"
+
+void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc);
+void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
+ const struct htt_tx_done *tx_done);
+void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info);
+
+struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
+ const u8 *addr);
+int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id,
+ const u8 *addr);
+int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id,
+ const u8 *addr);
+
+void ath10k_peer_map_event(struct ath10k_htt *htt,
+ struct htt_peer_map_event *ev);
+void ath10k_peer_unmap_event(struct ath10k_htt *htt,
+ struct htt_peer_unmap_event *ev);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
new file mode 100644
index 000000000000..7d4b7987422d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -0,0 +1,2081 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+
+#include "core.h"
+#include "htc.h"
+#include "debug.h"
+#include "wmi.h"
+#include "mac.h"
+
+void ath10k_wmi_flush_tx(struct ath10k *ar)
+{
+ int ret;
+
+ ret = wait_event_timeout(ar->wmi.wq,
+ atomic_read(&ar->wmi.pending_tx_count) == 0,
+ 5*HZ);
+ if (atomic_read(&ar->wmi.pending_tx_count) == 0)
+ return;
+
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+
+ if (ret < 0)
+ ath10k_warn("wmi flush failed (%d)\n", ret);
+}
+
+int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
+{
+ int ret;
+ ret = wait_for_completion_timeout(&ar->wmi.service_ready,
+ WMI_SERVICE_READY_TIMEOUT_HZ);
+ return ret;
+}
+
+int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
+{
+ int ret;
+ ret = wait_for_completion_timeout(&ar->wmi.unified_ready,
+ WMI_UNIFIED_READY_TIMEOUT_HZ);
+ return ret;
+}
+
+static struct sk_buff *ath10k_wmi_alloc_skb(u32 len)
+{
+ struct sk_buff *skb;
+ u32 round_len = roundup(len, 4);
+
+ skb = ath10k_htc_alloc_skb(WMI_SKB_HEADROOM + round_len);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, WMI_SKB_HEADROOM);
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
+ ath10k_warn("Unaligned WMI skb\n");
+
+ skb_put(skb, round_len);
+ memset(skb->data, 0, round_len);
+
+ return skb;
+}
+
+static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+ dev_kfree_skb(skb);
+
+ if (atomic_sub_return(1, &ar->wmi.pending_tx_count) == 0)
+ wake_up(&ar->wmi.wq);
+}
+
+/* WMI command API */
+static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
+ enum wmi_cmd_id cmd_id)
+{
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+ struct wmi_cmd_hdr *cmd_hdr;
+ int status;
+ u32 cmd = 0;
+
+ if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ return -ENOMEM;
+
+ cmd |= SM(cmd_id, WMI_CMD_HDR_CMD_ID);
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ cmd_hdr->cmd_id = __cpu_to_le32(cmd);
+
+ if (atomic_add_return(1, &ar->wmi.pending_tx_count) >
+ WMI_MAX_PENDING_TX_COUNT) {
+ /* avoid using up memory when FW hangs */
+ atomic_dec(&ar->wmi.pending_tx_count);
+ return -EBUSY;
+ }
+
+ memset(skb_cb, 0, sizeof(*skb_cb));
+
+ trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len);
+
+ status = ath10k_htc_send(ar->htc, ar->wmi.eid, skb);
+ if (status) {
+ dev_kfree_skb_any(skb);
+ atomic_dec(&ar->wmi.pending_tx_count);
+ return status;
+ }
+
+ return 0;
+}
+
+static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_scan_event *event = (struct wmi_scan_event *)skb->data;
+ enum wmi_scan_event_type event_type;
+ enum wmi_scan_completion_reason reason;
+ u32 freq;
+ u32 req_id;
+ u32 scan_id;
+ u32 vdev_id;
+
+ event_type = __le32_to_cpu(event->event_type);
+ reason = __le32_to_cpu(event->reason);
+ freq = __le32_to_cpu(event->channel_freq);
+ req_id = __le32_to_cpu(event->scan_req_id);
+ scan_id = __le32_to_cpu(event->scan_id);
+ vdev_id = __le32_to_cpu(event->vdev_id);
+
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENTID\n");
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "scan event type %d reason %d freq %d req_id %d "
+ "scan_id %d vdev_id %d\n",
+ event_type, reason, freq, req_id, scan_id, vdev_id);
+
+ spin_lock_bh(&ar->data_lock);
+
+ switch (event_type) {
+ case WMI_SCAN_EVENT_STARTED:
+ ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_STARTED\n");
+ if (ar->scan.in_progress && ar->scan.is_roc)
+ ieee80211_ready_on_channel(ar->hw);
+
+ complete(&ar->scan.started);
+ break;
+ case WMI_SCAN_EVENT_COMPLETED:
+ ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_COMPLETED\n");
+ switch (reason) {
+ case WMI_SCAN_REASON_COMPLETED:
+ ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_COMPLETED\n");
+ break;
+ case WMI_SCAN_REASON_CANCELLED:
+ ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_CANCELED\n");
+ break;
+ case WMI_SCAN_REASON_PREEMPTED:
+ ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_PREEMPTED\n");
+ break;
+ case WMI_SCAN_REASON_TIMEDOUT:
+ ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_TIMEDOUT\n");
+ break;
+ default:
+ break;
+ }
+
+ ar->scan_channel = NULL;
+ if (!ar->scan.in_progress) {
+ ath10k_warn("no scan requested, ignoring\n");
+ break;
+ }
+
+ if (ar->scan.is_roc) {
+ ath10k_offchan_tx_purge(ar);
+
+ if (!ar->scan.aborting)
+ ieee80211_remain_on_channel_expired(ar->hw);
+ } else {
+ ieee80211_scan_completed(ar->hw, ar->scan.aborting);
+ }
+
+ del_timer(&ar->scan.timeout);
+ complete_all(&ar->scan.completed);
+ ar->scan.in_progress = false;
+ break;
+ case WMI_SCAN_EVENT_BSS_CHANNEL:
+ ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_BSS_CHANNEL\n");
+ ar->scan_channel = NULL;
+ break;
+ case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
+ ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_FOREIGN_CHANNEL\n");
+ ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
+ if (ar->scan.in_progress && ar->scan.is_roc &&
+ ar->scan.roc_freq == freq) {
+ complete(&ar->scan.on_channel);
+ }
+ break;
+ case WMI_SCAN_EVENT_DEQUEUED:
+ ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_DEQUEUED\n");
+ break;
+ case WMI_SCAN_EVENT_PREEMPTED:
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_PREEMPTED\n");
+ break;
+ case WMI_SCAN_EVENT_START_FAILED:
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_START_FAILED\n");
+ break;
+ default:
+ break;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+ return 0;
+}
+
+static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
+{
+ enum ieee80211_band band;
+
+ switch (phy_mode) {
+ case MODE_11A:
+ case MODE_11NA_HT20:
+ case MODE_11NA_HT40:
+ case MODE_11AC_VHT20:
+ case MODE_11AC_VHT40:
+ case MODE_11AC_VHT80:
+ band = IEEE80211_BAND_5GHZ;
+ break;
+ case MODE_11G:
+ case MODE_11B:
+ case MODE_11GONLY:
+ case MODE_11NG_HT20:
+ case MODE_11NG_HT40:
+ case MODE_11AC_VHT20_2G:
+ case MODE_11AC_VHT40_2G:
+ case MODE_11AC_VHT80_2G:
+ default:
+ band = IEEE80211_BAND_2GHZ;
+ }
+
+ return band;
+}
+
+static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band)
+{
+ u8 rate_idx = 0;
+
+ /* rate in Kbps */
+ switch (rate) {
+ case 1000:
+ rate_idx = 0;
+ break;
+ case 2000:
+ rate_idx = 1;
+ break;
+ case 5500:
+ rate_idx = 2;
+ break;
+ case 11000:
+ rate_idx = 3;
+ break;
+ case 6000:
+ rate_idx = 4;
+ break;
+ case 9000:
+ rate_idx = 5;
+ break;
+ case 12000:
+ rate_idx = 6;
+ break;
+ case 18000:
+ rate_idx = 7;
+ break;
+ case 24000:
+ rate_idx = 8;
+ break;
+ case 36000:
+ rate_idx = 9;
+ break;
+ case 48000:
+ rate_idx = 10;
+ break;
+ case 54000:
+ rate_idx = 11;
+ break;
+ default:
+ break;
+ }
+
+ if (band == IEEE80211_BAND_5GHZ) {
+ if (rate_idx > 3)
+ /* Omit CCK rates */
+ rate_idx -= 4;
+ else
+ rate_idx = 0;
+ }
+
+ return rate_idx;
+}
+
+static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_mgmt_rx_event *event = (struct wmi_mgmt_rx_event *)skb->data;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_hdr *hdr;
+ u32 rx_status;
+ u32 channel;
+ u32 phy_mode;
+ u32 snr;
+ u32 rate;
+ u32 buf_len;
+ u16 fc;
+
+ channel = __le32_to_cpu(event->hdr.channel);
+ buf_len = __le32_to_cpu(event->hdr.buf_len);
+ rx_status = __le32_to_cpu(event->hdr.status);
+ snr = __le32_to_cpu(event->hdr.snr);
+ phy_mode = __le32_to_cpu(event->hdr.phy_mode);
+ rate = __le32_to_cpu(event->hdr.rate);
+
+ memset(status, 0, sizeof(*status));
+
+ ath10k_dbg(ATH10K_DBG_MGMT,
+ "event mgmt rx status %08x\n", rx_status);
+
+ if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) {
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ if (rx_status & WMI_RX_STATUS_ERR_KEY_CACHE_MISS) {
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ if (rx_status & WMI_RX_STATUS_ERR_CRC)
+ status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (rx_status & WMI_RX_STATUS_ERR_MIC)
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
+ status->band = phy_mode_to_band(phy_mode);
+ status->freq = ieee80211_channel_to_frequency(channel, status->band);
+ status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
+ status->rate_idx = get_rate_idx(rate, status->band);
+
+ skb_pull(skb, sizeof(event->hdr));
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = le16_to_cpu(hdr->frame_control);
+
+ if (fc & IEEE80211_FCTL_PROTECTED) {
+ status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED;
+ hdr->frame_control = __cpu_to_le16(fc &
+ ~IEEE80211_FCTL_PROTECTED);
+ }
+
+ ath10k_dbg(ATH10K_DBG_MGMT,
+ "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
+ skb, skb->len,
+ fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
+
+ ath10k_dbg(ATH10K_DBG_MGMT,
+ "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
+ status->freq, status->band, status->signal,
+ status->rate_idx);
+
+ /*
+ * packets from HTC come aligned to 4byte boundaries
+ * because they can originally come in along with a trailer
+ */
+ skb_trim(skb, buf_len);
+
+ ieee80211_rx(ar->hw, skb);
+ return 0;
+}
+
+static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_CHAN_INFO_EVENTID\n");
+}
+
+static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
+}
+
+static void ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_MESG_EVENTID\n");
+}
+
+static void ath10k_wmi_event_update_stats(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ struct wmi_stats_event *ev = (struct wmi_stats_event *)skb->data;
+
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
+
+ ath10k_debug_read_target_stats(ar, ev);
+}
+
+static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ struct wmi_vdev_start_response_event *ev;
+
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
+
+ ev = (struct wmi_vdev_start_response_event *)skb->data;
+
+ if (WARN_ON(__le32_to_cpu(ev->status)))
+ return;
+
+ complete(&ar->vdev_setup_done);
+}
+
+static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
+ complete(&ar->vdev_setup_done);
+}
+
+static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_PEER_STA_KICKOUT_EVENTID\n");
+}
+
+/*
+ * FIXME
+ *
+ * We don't report to mac80211 sleep state of connected
+ * stations. Due to this mac80211 can't fill in TIM IE
+ * correctly.
+ *
+ * I know of no way of getting nullfunc frames that contain
+ * sleep transition from connected stations - these do not
+ * seem to be sent from the target to the host. There also
+ * doesn't seem to be a dedicated event for that. So the
+ * only way left to do this would be to read tim_bitmap
+ * during SWBA.
+ *
+ * We could probably try using tim_bitmap from SWBA to tell
+ * mac80211 which stations are asleep and which are not. The
+ * problem here is calling mac80211 functions so many times
+ * could take too long and make us miss the time to submit
+ * the beacon to the target.
+ *
+ * So as a workaround we try to extend the TIM IE if there
+ * is unicast buffered for stations with aid > 7 and fill it
+ * in ourselves.
+ */
+static void ath10k_wmi_update_tim(struct ath10k *ar,
+ struct ath10k_vif *arvif,
+ struct sk_buff *bcn,
+ struct wmi_bcn_info *bcn_info)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
+ struct ieee80211_tim_ie *tim;
+ u8 *ies, *ie;
+ u8 ie_len, pvm_len;
+
+ /* if next SWBA has no tim_changed the tim_bitmap is garbage.
+ * we must copy the bitmap upon change and reuse it later */
+ if (__le32_to_cpu(bcn_info->tim_info.tim_changed)) {
+ int i;
+
+ BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) !=
+ sizeof(bcn_info->tim_info.tim_bitmap));
+
+ for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) {
+ __le32 t = bcn_info->tim_info.tim_bitmap[i / 4];
+ u32 v = __le32_to_cpu(t);
+ arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
+ }
+
+ /* FW reports either length 0 or 16
+ * so we calculate this on our own */
+ arvif->u.ap.tim_len = 0;
+ for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++)
+ if (arvif->u.ap.tim_bitmap[i])
+ arvif->u.ap.tim_len = i;
+
+ arvif->u.ap.tim_len++;
+ }
+
+ ies = bcn->data;
+ ies += ieee80211_hdrlen(hdr->frame_control);
+ ies += 12; /* fixed parameters */
+
+ ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
+ (u8 *)skb_tail_pointer(bcn) - ies);
+ if (!ie) {
+ /* highly unlikely for mac80211 */
+ ath10k_warn("no tim ie found;\n");
+ return;
+ }
+
+ tim = (void *)ie + 2;
+ ie_len = ie[1];
+ pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
+
+ if (pvm_len < arvif->u.ap.tim_len) {
+ int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len;
+ int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
+ void *next_ie = ie + 2 + ie_len;
+
+ if (skb_put(bcn, expand_size)) {
+ memmove(next_ie + expand_size, next_ie, move_size);
+
+ ie[1] += expand_size;
+ ie_len += expand_size;
+ pvm_len += expand_size;
+ } else {
+ ath10k_warn("tim expansion failed\n");
+ }
+ }
+
+ if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) {
+ ath10k_warn("tim pvm length is too great (%d)\n", pvm_len);
+ return;
+ }
+
+ tim->bitmap_ctrl = !!__le32_to_cpu(bcn_info->tim_info.tim_mcast);
+ memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
+
+ ath10k_dbg(ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
+ tim->dtim_count, tim->dtim_period,
+ tim->bitmap_ctrl, pvm_len);
+}
+
+static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len,
+ struct wmi_p2p_noa_info *noa)
+{
+ struct ieee80211_p2p_noa_attr *noa_attr;
+ u8 ctwindow_oppps = noa->ctwindow_oppps;
+ u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
+ bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
+ __le16 *noa_attr_len;
+ u16 attr_len;
+ u8 noa_descriptors = noa->num_descriptors;
+ int i;
+
+ /* P2P IE */
+ data[0] = WLAN_EID_VENDOR_SPECIFIC;
+ data[1] = len - 2;
+ data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
+ data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
+ data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
+ data[5] = WLAN_OUI_TYPE_WFA_P2P;
+
+ /* NOA ATTR */
+ data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
+ noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
+ noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
+
+ noa_attr->index = noa->index;
+ noa_attr->oppps_ctwindow = ctwindow;
+ if (oppps)
+ noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
+
+ for (i = 0; i < noa_descriptors; i++) {
+ noa_attr->desc[i].count =
+ __le32_to_cpu(noa->descriptors[i].type_count);
+ noa_attr->desc[i].duration = noa->descriptors[i].duration;
+ noa_attr->desc[i].interval = noa->descriptors[i].interval;
+ noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
+ }
+
+ attr_len = 2; /* index + oppps_ctwindow */
+ attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+ *noa_attr_len = __cpu_to_le16(attr_len);
+}
+
+static u32 ath10k_p2p_calc_noa_ie_len(struct wmi_p2p_noa_info *noa)
+{
+ u32 len = 0;
+ u8 noa_descriptors = noa->num_descriptors;
+ u8 opp_ps_info = noa->ctwindow_oppps;
+ bool opps_enabled = !!(opp_ps_info & WMI_P2P_OPPPS_ENABLE_BIT);
+
+
+ if (!noa_descriptors && !opps_enabled)
+ return len;
+
+ len += 1 + 1 + 4; /* EID + len + OUI */
+ len += 1 + 2; /* noa attr + attr len */
+ len += 1 + 1; /* index + oppps_ctwindow */
+ len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+
+ return len;
+}
+
+static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
+ struct sk_buff *bcn,
+ struct wmi_bcn_info *bcn_info)
+{
+ struct wmi_p2p_noa_info *noa = &bcn_info->p2p_noa_info;
+ u8 *new_data, *old_data = arvif->u.ap.noa_data;
+ u32 new_len;
+
+ if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
+ return;
+
+ ath10k_dbg(ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
+ if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) {
+ new_len = ath10k_p2p_calc_noa_ie_len(noa);
+ if (!new_len)
+ goto cleanup;
+
+ new_data = kmalloc(new_len, GFP_ATOMIC);
+ if (!new_data)
+ goto cleanup;
+
+ ath10k_p2p_fill_noa_ie(new_data, new_len, noa);
+
+ spin_lock_bh(&ar->data_lock);
+ arvif->u.ap.noa_data = new_data;
+ arvif->u.ap.noa_len = new_len;
+ spin_unlock_bh(&ar->data_lock);
+ kfree(old_data);
+ }
+
+ if (arvif->u.ap.noa_data)
+ if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
+ memcpy(skb_put(bcn, arvif->u.ap.noa_len),
+ arvif->u.ap.noa_data,
+ arvif->u.ap.noa_len);
+ return;
+
+cleanup:
+ spin_lock_bh(&ar->data_lock);
+ arvif->u.ap.noa_data = NULL;
+ arvif->u.ap.noa_len = 0;
+ spin_unlock_bh(&ar->data_lock);
+ kfree(old_data);
+}
+
+
+static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_host_swba_event *ev;
+ u32 map;
+ int i = -1;
+ struct wmi_bcn_info *bcn_info;
+ struct ath10k_vif *arvif;
+ struct wmi_bcn_tx_arg arg;
+ struct sk_buff *bcn;
+ int vdev_id = 0;
+ int ret;
+
+ ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
+
+ ev = (struct wmi_host_swba_event *)skb->data;
+ map = __le32_to_cpu(ev->vdev_map);
+
+ ath10k_dbg(ATH10K_DBG_MGMT, "host swba:\n"
+ "-vdev map 0x%x\n",
+ ev->vdev_map);
+
+ for (; map; map >>= 1, vdev_id++) {
+ if (!(map & 0x1))
+ continue;
+
+ i++;
+
+ if (i >= WMI_MAX_AP_VDEV) {
+ ath10k_warn("swba has corrupted vdev map\n");
+ break;
+ }
+
+ bcn_info = &ev->bcn_info[i];
+
+ ath10k_dbg(ATH10K_DBG_MGMT,
+ "-bcn_info[%d]:\n"
+ "--tim_len %d\n"
+ "--tim_mcast %d\n"
+ "--tim_changed %d\n"
+ "--tim_num_ps_pending %d\n"
+ "--tim_bitmap 0x%08x%08x%08x%08x\n",
+ i,
+ __le32_to_cpu(bcn_info->tim_info.tim_len),
+ __le32_to_cpu(bcn_info->tim_info.tim_mcast),
+ __le32_to_cpu(bcn_info->tim_info.tim_changed),
+ __le32_to_cpu(bcn_info->tim_info.tim_num_ps_pending),
+ __le32_to_cpu(bcn_info->tim_info.tim_bitmap[3]),
+ __le32_to_cpu(bcn_info->tim_info.tim_bitmap[2]),
+ __le32_to_cpu(bcn_info->tim_info.tim_bitmap[1]),
+ __le32_to_cpu(bcn_info->tim_info.tim_bitmap[0]));
+
+ arvif = ath10k_get_arvif(ar, vdev_id);
+ if (arvif == NULL) {
+ ath10k_warn("no vif for vdev_id %d found\n", vdev_id);
+ continue;
+ }
+
+ bcn = ieee80211_beacon_get(ar->hw, arvif->vif);
+ if (!bcn) {
+ ath10k_warn("could not get mac80211 beacon\n");
+ continue;
+ }
+
+ ath10k_tx_h_seq_no(bcn);
+ ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info);
+ ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info);
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.tx_rate = 0;
+ arg.tx_power = 0;
+ arg.bcn = bcn->data;
+ arg.bcn_len = bcn->len;
+
+ ret = ath10k_wmi_beacon_send(ar, &arg);
+ if (ret)
+ ath10k_warn("could not send beacon (%d)\n", ret);
+
+ dev_kfree_skb_any(bcn);
+ }
+}
+
+static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
+}
+
+static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_PHYERR_EVENTID\n");
+}
+
+static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n");
+}
+
+static void ath10k_wmi_event_profile_match(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
+}
+
+static void ath10k_wmi_event_debug_print(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_PRINT_EVENTID\n");
+}
+
+static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
+}
+
+static void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
+}
+
+static void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
+}
+
+static void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
+}
+
+static void ath10k_wmi_event_rtt_error_report(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
+}
+
+static void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
+}
+
+static void ath10k_wmi_event_dcs_interference(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
+}
+
+static void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n");
+}
+
+static void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
+}
+
+static void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
+}
+
+static void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
+}
+
+static void ath10k_wmi_event_delba_complete(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
+}
+
+static void ath10k_wmi_event_addba_complete(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
+}
+
+static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
+}
+
+static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ struct wmi_service_ready_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev)) {
+ ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
+ skb->len, sizeof(*ev));
+ return;
+ }
+
+ ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power);
+ ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power);
+ ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info);
+ ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info);
+ ar->fw_version_major =
+ (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24;
+ ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff);
+ ar->fw_version_release =
+ (__le32_to_cpu(ev->sw_version_1) & 0xffff0000) >> 16;
+ ar->fw_version_build = (__le32_to_cpu(ev->sw_version_1) & 0x0000ffff);
+ ar->phy_capability = __le32_to_cpu(ev->phy_capability);
+
+ ar->ath_common.regulatory.current_rd =
+ __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
+
+ ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap,
+ sizeof(ev->wmi_service_bitmap));
+
+ if (strlen(ar->hw->wiphy->fw_version) == 0) {
+ snprintf(ar->hw->wiphy->fw_version,
+ sizeof(ar->hw->wiphy->fw_version),
+ "%u.%u.%u.%u",
+ ar->fw_version_major,
+ ar->fw_version_minor,
+ ar->fw_version_release,
+ ar->fw_version_build);
+ }
+
+ /* FIXME: it probably should be better to support this */
+ if (__le32_to_cpu(ev->num_mem_reqs) > 0) {
+ ath10k_warn("target requested %d memory chunks; ignoring\n",
+ __le32_to_cpu(ev->num_mem_reqs));
+ }
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u\n",
+ __le32_to_cpu(ev->sw_version),
+ __le32_to_cpu(ev->sw_version_1),
+ __le32_to_cpu(ev->abi_version),
+ __le32_to_cpu(ev->phy_capability),
+ __le32_to_cpu(ev->ht_cap_info),
+ __le32_to_cpu(ev->vht_cap_info),
+ __le32_to_cpu(ev->vht_supp_mcs),
+ __le32_to_cpu(ev->sys_cap_info),
+ __le32_to_cpu(ev->num_mem_reqs));
+
+ complete(&ar->wmi.service_ready);
+}
+
+static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data;
+
+ if (WARN_ON(skb->len < sizeof(*ev)))
+ return -EINVAL;
+
+ memcpy(ar->mac_addr, ev->mac_addr.addr, ETH_ALEN);
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n",
+ __le32_to_cpu(ev->sw_version),
+ __le32_to_cpu(ev->abi_version),
+ ev->mac_addr.addr,
+ __le32_to_cpu(ev->status));
+
+ complete(&ar->wmi.unified_ready);
+ return 0;
+}
+
+static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum wmi_event_id id;
+ u16 len;
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+ if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ return;
+
+ len = skb->len;
+
+ trace_ath10k_wmi_event(id, skb->data, skb->len);
+
+ switch (id) {
+ case WMI_MGMT_RX_EVENTID:
+ ath10k_wmi_event_mgmt_rx(ar, skb);
+ /* mgmt_rx() owns the skb now! */
+ return;
+ case WMI_SCAN_EVENTID:
+ ath10k_wmi_event_scan(ar, skb);
+ break;
+ case WMI_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_chan_info(ar, skb);
+ break;
+ case WMI_ECHO_EVENTID:
+ ath10k_wmi_event_echo(ar, skb);
+ break;
+ case WMI_DEBUG_MESG_EVENTID:
+ ath10k_wmi_event_debug_mesg(ar, skb);
+ break;
+ case WMI_UPDATE_STATS_EVENTID:
+ ath10k_wmi_event_update_stats(ar, skb);
+ break;
+ case WMI_VDEV_START_RESP_EVENTID:
+ ath10k_wmi_event_vdev_start_resp(ar, skb);
+ break;
+ case WMI_VDEV_STOPPED_EVENTID:
+ ath10k_wmi_event_vdev_stopped(ar, skb);
+ break;
+ case WMI_PEER_STA_KICKOUT_EVENTID:
+ ath10k_wmi_event_peer_sta_kickout(ar, skb);
+ break;
+ case WMI_HOST_SWBA_EVENTID:
+ ath10k_wmi_event_host_swba(ar, skb);
+ break;
+ case WMI_TBTTOFFSET_UPDATE_EVENTID:
+ ath10k_wmi_event_tbttoffset_update(ar, skb);
+ break;
+ case WMI_PHYERR_EVENTID:
+ ath10k_wmi_event_phyerr(ar, skb);
+ break;
+ case WMI_ROAM_EVENTID:
+ ath10k_wmi_event_roam(ar, skb);
+ break;
+ case WMI_PROFILE_MATCH:
+ ath10k_wmi_event_profile_match(ar, skb);
+ break;
+ case WMI_DEBUG_PRINT_EVENTID:
+ ath10k_wmi_event_debug_print(ar, skb);
+ break;
+ case WMI_PDEV_QVIT_EVENTID:
+ ath10k_wmi_event_pdev_qvit(ar, skb);
+ break;
+ case WMI_WLAN_PROFILE_DATA_EVENTID:
+ ath10k_wmi_event_wlan_profile_data(ar, skb);
+ break;
+ case WMI_RTT_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_measurement_report(ar, skb);
+ break;
+ case WMI_TSF_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_tsf_measurement_report(ar, skb);
+ break;
+ case WMI_RTT_ERROR_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_error_report(ar, skb);
+ break;
+ case WMI_WOW_WAKEUP_HOST_EVENTID:
+ ath10k_wmi_event_wow_wakeup_host(ar, skb);
+ break;
+ case WMI_DCS_INTERFERENCE_EVENTID:
+ ath10k_wmi_event_dcs_interference(ar, skb);
+ break;
+ case WMI_PDEV_TPC_CONFIG_EVENTID:
+ ath10k_wmi_event_pdev_tpc_config(ar, skb);
+ break;
+ case WMI_PDEV_FTM_INTG_EVENTID:
+ ath10k_wmi_event_pdev_ftm_intg(ar, skb);
+ break;
+ case WMI_GTK_OFFLOAD_STATUS_EVENTID:
+ ath10k_wmi_event_gtk_offload_status(ar, skb);
+ break;
+ case WMI_GTK_REKEY_FAIL_EVENTID:
+ ath10k_wmi_event_gtk_rekey_fail(ar, skb);
+ break;
+ case WMI_TX_DELBA_COMPLETE_EVENTID:
+ ath10k_wmi_event_delba_complete(ar, skb);
+ break;
+ case WMI_TX_ADDBA_COMPLETE_EVENTID:
+ ath10k_wmi_event_addba_complete(ar, skb);
+ break;
+ case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
+ ath10k_wmi_event_vdev_install_key_complete(ar, skb);
+ break;
+ case WMI_SERVICE_READY_EVENTID:
+ ath10k_wmi_service_ready_event_rx(ar, skb);
+ break;
+ case WMI_READY_EVENTID:
+ ath10k_wmi_ready_event_rx(ar, skb);
+ break;
+ default:
+ ath10k_warn("Unknown eventid: %d\n", id);
+ break;
+ }
+
+ dev_kfree_skb(skb);
+}
+
+static void ath10k_wmi_event_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k,
+ wmi.wmi_event_work);
+ struct sk_buff *skb;
+
+ for (;;) {
+ skb = skb_dequeue(&ar->wmi.wmi_event_list);
+ if (!skb)
+ break;
+
+ ath10k_wmi_event_process(ar, skb);
+ }
+}
+
+static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_cmd_hdr *cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ enum wmi_event_id event_id;
+
+ event_id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+ /* some events require to be handled ASAP
+ * thus can't be defered to a worker thread */
+ switch (event_id) {
+ case WMI_HOST_SWBA_EVENTID:
+ case WMI_MGMT_RX_EVENTID:
+ ath10k_wmi_event_process(ar, skb);
+ return;
+ default:
+ break;
+ }
+
+ skb_queue_tail(&ar->wmi.wmi_event_list, skb);
+ queue_work(ar->workqueue, &ar->wmi.wmi_event_work);
+}
+
+/* WMI Initialization functions */
+int ath10k_wmi_attach(struct ath10k *ar)
+{
+ init_completion(&ar->wmi.service_ready);
+ init_completion(&ar->wmi.unified_ready);
+ init_waitqueue_head(&ar->wmi.wq);
+
+ skb_queue_head_init(&ar->wmi.wmi_event_list);
+ INIT_WORK(&ar->wmi.wmi_event_work, ath10k_wmi_event_work);
+
+ return 0;
+}
+
+void ath10k_wmi_detach(struct ath10k *ar)
+{
+ /* HTC should've drained the packets already */
+ if (WARN_ON(atomic_read(&ar->wmi.pending_tx_count) > 0))
+ ath10k_warn("there are still pending packets\n");
+
+ cancel_work_sync(&ar->wmi.wmi_event_work);
+ skb_queue_purge(&ar->wmi.wmi_event_list);
+}
+
+int ath10k_wmi_connect_htc_service(struct ath10k *ar)
+{
+ int status;
+ struct ath10k_htc_svc_conn_req conn_req;
+ struct ath10k_htc_svc_conn_resp conn_resp;
+
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+
+ /* these fields are the same for all service endpoints */
+ conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
+
+ /* connect to control service */
+ conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
+
+ status = ath10k_htc_connect_service(ar->htc, &conn_req, &conn_resp);
+ if (status) {
+ ath10k_warn("failed to connect to WMI CONTROL service status: %d\n",
+ status);
+ return status;
+ }
+
+ ar->wmi.eid = conn_resp.eid;
+ return 0;
+}
+
+int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
+ u16 rd5g, u16 ctl2g, u16 ctl5g)
+{
+ struct wmi_pdev_set_regdomain_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
+ cmd->reg_domain = __cpu_to_le32(rd);
+ cmd->reg_domain_2G = __cpu_to_le32(rd2g);
+ cmd->reg_domain_5G = __cpu_to_le32(rd5g);
+ cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
+ cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
+ rd, rd2g, rd5g, ctl2g, ctl5g);
+
+ return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
+}
+
+int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
+ const struct wmi_channel_arg *arg)
+{
+ struct wmi_set_channel_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (arg->passive)
+ return -EINVAL;
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_channel_cmd *)skb->data;
+ cmd->chan.mhz = __cpu_to_le32(arg->freq);
+ cmd->chan.band_center_freq1 = __cpu_to_le32(arg->freq);
+ cmd->chan.mode = arg->mode;
+ cmd->chan.min_power = arg->min_power;
+ cmd->chan.max_power = arg->max_power;
+ cmd->chan.reg_power = arg->max_reg_power;
+ cmd->chan.reg_classid = arg->reg_class_id;
+ cmd->chan.antenna_max = arg->max_antenna_gain;
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi set channel mode %d freq %d\n",
+ arg->mode, arg->freq);
+
+ return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_CHANNEL_CMDID);
+}
+
+int ath10k_wmi_pdev_suspend_target(struct ath10k *ar)
+{
+ struct wmi_pdev_suspend_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
+ cmd->suspend_opt = WMI_PDEV_SUSPEND;
+
+ return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SUSPEND_CMDID);
+}
+
+int ath10k_wmi_pdev_resume_target(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(0);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_RESUME_CMDID);
+}
+
+int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,
+ u32 value)
+{
+ struct wmi_pdev_set_param_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
+ cmd->param_id = __cpu_to_le32(id);
+ cmd->param_value = __cpu_to_le32(value);
+
+ ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
+ id, value);
+ return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_PARAM_CMDID);
+}
+
+int ath10k_wmi_cmd_init(struct ath10k *ar)
+{
+ struct wmi_init_cmd *cmd;
+ struct sk_buff *buf;
+ struct wmi_resource_config config = {};
+ u32 val;
+
+ config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
+ config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS);
+ config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS);
+
+ config.num_offload_reorder_bufs =
+ __cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS);
+
+ config.num_peer_keys = __cpu_to_le32(TARGET_NUM_PEER_KEYS);
+ config.num_tids = __cpu_to_le32(TARGET_NUM_TIDS);
+ config.ast_skid_limit = __cpu_to_le32(TARGET_AST_SKID_LIMIT);
+ config.tx_chain_mask = __cpu_to_le32(TARGET_TX_CHAIN_MASK);
+ config.rx_chain_mask = __cpu_to_le32(TARGET_RX_CHAIN_MASK);
+ config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
+ config.rx_decap_mode = __cpu_to_le32(TARGET_RX_DECAP_MODE);
+
+ config.scan_max_pending_reqs =
+ __cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
+
+ config.bmiss_offload_max_vdev =
+ __cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV);
+
+ config.roam_offload_max_vdev =
+ __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV);
+
+ config.roam_offload_max_ap_profiles =
+ __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES);
+
+ config.num_mcast_groups = __cpu_to_le32(TARGET_NUM_MCAST_GROUPS);
+ config.num_mcast_table_elems =
+ __cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS);
+
+ config.mcast2ucast_mode = __cpu_to_le32(TARGET_MCAST2UCAST_MODE);
+ config.tx_dbg_log_size = __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE);
+ config.num_wds_entries = __cpu_to_le32(TARGET_NUM_WDS_ENTRIES);
+ config.dma_burst_size = __cpu_to_le32(TARGET_DMA_BURST_SIZE);
+ config.mac_aggr_delim = __cpu_to_le32(TARGET_MAC_AGGR_DELIM);
+
+ val = TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+ config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
+
+ config.vow_config = __cpu_to_le32(TARGET_VOW_CONFIG);
+
+ config.gtk_offload_max_vdev =
+ __cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV);
+
+ config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
+ config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
+
+ buf = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ if (!buf)
+ return -ENOMEM;
+
+ cmd = (struct wmi_init_cmd *)buf->data;
+ cmd->num_host_mem_chunks = 0;
+ memcpy(&cmd->resource_config, &config, sizeof(config));
+
+ ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n");
+ return ath10k_wmi_cmd_send(ar, buf, WMI_INIT_CMDID);
+}
+
+static int ath10k_wmi_start_scan_calc_len(const struct wmi_start_scan_arg *arg)
+{
+ int len;
+
+ len = sizeof(struct wmi_start_scan_cmd);
+
+ if (arg->ie_len) {
+ if (!arg->ie)
+ return -EINVAL;
+ if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
+ return -EINVAL;
+
+ len += sizeof(struct wmi_ie_data);
+ len += roundup(arg->ie_len, 4);
+ }
+
+ if (arg->n_channels) {
+ if (!arg->channels)
+ return -EINVAL;
+ if (arg->n_channels > ARRAY_SIZE(arg->channels))
+ return -EINVAL;
+
+ len += sizeof(struct wmi_chan_list);
+ len += sizeof(__le32) * arg->n_channels;
+ }
+
+ if (arg->n_ssids) {
+ if (!arg->ssids)
+ return -EINVAL;
+ if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
+ return -EINVAL;
+
+ len += sizeof(struct wmi_ssid_list);
+ len += sizeof(struct wmi_ssid) * arg->n_ssids;
+ }
+
+ if (arg->n_bssids) {
+ if (!arg->bssids)
+ return -EINVAL;
+ if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
+ return -EINVAL;
+
+ len += sizeof(struct wmi_bssid_list);
+ len += sizeof(struct wmi_mac_addr) * arg->n_bssids;
+ }
+
+ return len;
+}
+
+int ath10k_wmi_start_scan(struct ath10k *ar,
+ const struct wmi_start_scan_arg *arg)
+{
+ struct wmi_start_scan_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_ie_data *ie;
+ struct wmi_chan_list *channels;
+ struct wmi_ssid_list *ssids;
+ struct wmi_bssid_list *bssids;
+ u32 scan_id;
+ u32 scan_req_id;
+ int off;
+ int len = 0;
+ int i;
+
+ len = ath10k_wmi_start_scan_calc_len(arg);
+ if (len < 0)
+ return len; /* len contains error code here */
+
+ skb = ath10k_wmi_alloc_skb(len);
+ if (!skb)
+ return -ENOMEM;
+
+ scan_id = WMI_HOST_SCAN_REQ_ID_PREFIX;
+ scan_id |= arg->scan_id;
+
+ scan_req_id = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
+ scan_req_id |= arg->scan_req_id;
+
+ cmd = (struct wmi_start_scan_cmd *)skb->data;
+ cmd->scan_id = __cpu_to_le32(scan_id);
+ cmd->scan_req_id = __cpu_to_le32(scan_req_id);
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
+ cmd->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
+ cmd->dwell_time_active = __cpu_to_le32(arg->dwell_time_active);
+ cmd->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
+ cmd->min_rest_time = __cpu_to_le32(arg->min_rest_time);
+ cmd->max_rest_time = __cpu_to_le32(arg->max_rest_time);
+ cmd->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time);
+ cmd->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
+ cmd->idle_time = __cpu_to_le32(arg->idle_time);
+ cmd->max_scan_time = __cpu_to_le32(arg->max_scan_time);
+ cmd->probe_delay = __cpu_to_le32(arg->probe_delay);
+ cmd->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags);
+
+ /* TLV list starts after fields included in the struct */
+ off = sizeof(*cmd);
+
+ if (arg->n_channels) {
+ channels = (void *)skb->data + off;
+ channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG);
+ channels->num_chan = __cpu_to_le32(arg->n_channels);
+
+ for (i = 0; i < arg->n_channels; i++)
+ channels->channel_list[i] =
+ __cpu_to_le32(arg->channels[i]);
+
+ off += sizeof(*channels);
+ off += sizeof(__le32) * arg->n_channels;
+ }
+
+ if (arg->n_ssids) {
+ ssids = (void *)skb->data + off;
+ ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG);
+ ssids->num_ssids = __cpu_to_le32(arg->n_ssids);
+
+ for (i = 0; i < arg->n_ssids; i++) {
+ ssids->ssids[i].ssid_len =
+ __cpu_to_le32(arg->ssids[i].len);
+ memcpy(&ssids->ssids[i].ssid,
+ arg->ssids[i].ssid,
+ arg->ssids[i].len);
+ }
+
+ off += sizeof(*ssids);
+ off += sizeof(struct wmi_ssid) * arg->n_ssids;
+ }
+
+ if (arg->n_bssids) {
+ bssids = (void *)skb->data + off;
+ bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG);
+ bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
+
+ for (i = 0; i < arg->n_bssids; i++)
+ memcpy(&bssids->bssid_list[i],
+ arg->bssids[i].bssid,
+ ETH_ALEN);
+
+ off += sizeof(*bssids);
+ off += sizeof(struct wmi_mac_addr) * arg->n_bssids;
+ }
+
+ if (arg->ie_len) {
+ ie = (void *)skb->data + off;
+ ie->tag = __cpu_to_le32(WMI_IE_TAG);
+ ie->ie_len = __cpu_to_le32(arg->ie_len);
+ memcpy(ie->ie_data, arg->ie, arg->ie_len);
+
+ off += sizeof(*ie);
+ off += roundup(arg->ie_len, 4);
+ }
+
+ if (off != skb->len) {
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n");
+ return ath10k_wmi_cmd_send(ar, skb, WMI_START_SCAN_CMDID);
+}
+
+void ath10k_wmi_start_scan_init(struct ath10k *ar,
+ struct wmi_start_scan_arg *arg)
+{
+ /* setup commonly used values */
+ arg->scan_req_id = 1;
+ arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
+ arg->dwell_time_active = 50;
+ arg->dwell_time_passive = 150;
+ arg->min_rest_time = 50;
+ arg->max_rest_time = 500;
+ arg->repeat_probe_time = 0;
+ arg->probe_spacing_time = 0;
+ arg->idle_time = 0;
+ arg->max_scan_time = 5000;
+ arg->probe_delay = 5;
+ arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
+ | WMI_SCAN_EVENT_COMPLETED
+ | WMI_SCAN_EVENT_BSS_CHANNEL
+ | WMI_SCAN_EVENT_FOREIGN_CHANNEL
+ | WMI_SCAN_EVENT_DEQUEUED;
+ arg->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES;
+ arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+ arg->n_bssids = 1;
+ arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
+}
+
+int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
+{
+ struct wmi_stop_scan_cmd *cmd;
+ struct sk_buff *skb;
+ u32 scan_id;
+ u32 req_id;
+
+ if (arg->req_id > 0xFFF)
+ return -EINVAL;
+ if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
+ return -EINVAL;
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ scan_id = arg->u.scan_id;
+ scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
+
+ req_id = arg->req_id;
+ req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
+
+ cmd = (struct wmi_stop_scan_cmd *)skb->data;
+ cmd->req_type = __cpu_to_le32(arg->req_type);
+ cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
+ cmd->scan_id = __cpu_to_le32(scan_id);
+ cmd->scan_req_id = __cpu_to_le32(req_id);
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
+ arg->req_id, arg->req_type, arg->u.scan_id);
+ return ath10k_wmi_cmd_send(ar, skb, WMI_STOP_SCAN_CMDID);
+}
+
+int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
+ enum wmi_vdev_type type,
+ enum wmi_vdev_subtype subtype,
+ const u8 macaddr[ETH_ALEN])
+{
+ struct wmi_vdev_create_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_create_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->vdev_type = __cpu_to_le32(type);
+ cmd->vdev_subtype = __cpu_to_le32(subtype);
+ memcpy(cmd->vdev_macaddr.addr, macaddr, ETH_ALEN);
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
+ vdev_id, type, subtype, macaddr);
+
+ return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_CREATE_CMDID);
+}
+
+int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
+{
+ struct wmi_vdev_delete_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_delete_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "WMI vdev delete id %d\n", vdev_id);
+
+ return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DELETE_CMDID);
+}
+
+static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
+ const struct wmi_vdev_start_request_arg *arg,
+ enum wmi_cmd_id cmd_id)
+{
+ struct wmi_vdev_start_request_cmd *cmd;
+ struct sk_buff *skb;
+ const char *cmdname;
+ u32 flags = 0;
+
+ if (cmd_id != WMI_VDEV_START_REQUEST_CMDID &&
+ cmd_id != WMI_VDEV_RESTART_REQUEST_CMDID)
+ return -EINVAL;
+ if (WARN_ON(arg->ssid && arg->ssid_len == 0))
+ return -EINVAL;
+ if (WARN_ON(arg->hidden_ssid && !arg->ssid))
+ return -EINVAL;
+ if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
+ return -EINVAL;
+
+ if (cmd_id == WMI_VDEV_START_REQUEST_CMDID)
+ cmdname = "start";
+ else if (cmd_id == WMI_VDEV_RESTART_REQUEST_CMDID)
+ cmdname = "restart";
+ else
+ return -EINVAL; /* should not happen, we already check cmd_id */
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ if (arg->hidden_ssid)
+ flags |= WMI_VDEV_START_HIDDEN_SSID;
+ if (arg->pmf_enabled)
+ flags |= WMI_VDEV_START_PMF_ENABLED;
+
+ cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
+ cmd->beacon_interval = __cpu_to_le32(arg->bcn_intval);
+ cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
+ cmd->flags = __cpu_to_le32(flags);
+ cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
+ cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
+
+ if (arg->ssid) {
+ cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
+ memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
+ }
+
+ cmd->chan.mhz = __cpu_to_le32(arg->channel.freq);
+
+ cmd->chan.band_center_freq1 =
+ __cpu_to_le32(arg->channel.band_center_freq1);
+
+ cmd->chan.mode = arg->channel.mode;
+ cmd->chan.min_power = arg->channel.min_power;
+ cmd->chan.max_power = arg->channel.max_power;
+ cmd->chan.reg_power = arg->channel.max_reg_power;
+ cmd->chan.reg_classid = arg->channel.reg_class_id;
+ cmd->chan.antenna_max = arg->channel.max_antenna_gain;
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi vdev %s id 0x%x freq %d, mode %d, ch_flags: 0x%0X,"
+ "max_power: %d\n", cmdname, arg->vdev_id, arg->channel.freq,
+ arg->channel.mode, flags, arg->channel.max_power);
+
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+int ath10k_wmi_vdev_start(struct ath10k *ar,
+ const struct wmi_vdev_start_request_arg *arg)
+{
+ return ath10k_wmi_vdev_start_restart(ar, arg,
+ WMI_VDEV_START_REQUEST_CMDID);
+}
+
+int ath10k_wmi_vdev_restart(struct ath10k *ar,
+ const struct wmi_vdev_start_request_arg *arg)
+{
+ return ath10k_wmi_vdev_start_restart(ar, arg,
+ WMI_VDEV_RESTART_REQUEST_CMDID);
+}
+
+int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
+{
+ struct wmi_vdev_stop_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_stop_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+ ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
+
+ return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_STOP_CMDID);
+}
+
+int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
+{
+ struct wmi_vdev_up_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_up_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->vdev_assoc_id = __cpu_to_le32(aid);
+ memcpy(&cmd->vdev_bssid.addr, bssid, 6);
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
+ vdev_id, aid, bssid);
+
+ return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_UP_CMDID);
+}
+
+int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
+{
+ struct wmi_vdev_down_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_down_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi mgmt vdev down id 0x%x\n", vdev_id);
+
+ return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DOWN_CMDID);
+}
+
+int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
+ enum wmi_vdev_param param_id, u32 param_value)
+{
+ struct wmi_vdev_set_param_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(param_value);
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi vdev id 0x%x set param %d value %d\n",
+ vdev_id, param_id, param_value);
+
+ return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_SET_PARAM_CMDID);
+}
+
+int ath10k_wmi_vdev_install_key(struct ath10k *ar,
+ const struct wmi_vdev_install_key_arg *arg)
+{
+ struct wmi_vdev_install_key_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
+ return -EINVAL;
+ if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
+ return -EINVAL;
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->key_len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->key_idx = __cpu_to_le32(arg->key_idx);
+ cmd->key_flags = __cpu_to_le32(arg->key_flags);
+ cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
+ cmd->key_len = __cpu_to_le32(arg->key_len);
+ cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
+ cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
+
+ if (arg->macaddr)
+ memcpy(cmd->peer_macaddr.addr, arg->macaddr, ETH_ALEN);
+ if (arg->key_data)
+ memcpy(cmd->key_data, arg->key_data, arg->key_len);
+
+ return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_INSTALL_KEY_CMDID);
+}
+
+int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN])
+{
+ struct wmi_peer_create_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_create_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi peer create vdev_id %d peer_addr %pM\n",
+ vdev_id, peer_addr);
+ return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_CREATE_CMDID);
+}
+
+int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN])
+{
+ struct wmi_peer_delete_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_delete_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi peer delete vdev_id %d peer_addr %pM\n",
+ vdev_id, peer_addr);
+ return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_DELETE_CMDID);
+}
+
+int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
+{
+ struct wmi_peer_flush_tids_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
+ memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
+ vdev_id, peer_addr, tid_bitmap);
+ return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_FLUSH_TIDS_CMDID);
+}
+
+int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
+ const u8 *peer_addr, enum wmi_peer_param param_id,
+ u32 param_value)
+{
+ struct wmi_peer_set_param_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_set_param_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(param_value);
+ memcpy(&cmd->peer_macaddr.addr, peer_addr, 6);
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi vdev %d peer 0x%pM set param %d value %d\n",
+ vdev_id, peer_addr, param_id, param_value);
+
+ return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_SET_PARAM_CMDID);
+}
+
+int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_ps_mode psmode)
+{
+ struct wmi_sta_powersave_mode_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->sta_ps_mode = __cpu_to_le32(psmode);
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi set powersave id 0x%x mode %d\n",
+ vdev_id, psmode);
+
+ return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_MODE_CMDID);
+}
+
+int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_powersave_param param_id,
+ u32 value)
+{
+ struct wmi_sta_powersave_param_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(value);
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi sta ps param vdev_id 0x%x param %d value %d\n",
+ vdev_id, param_id, value);
+ return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
+}
+
+int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ enum wmi_ap_ps_peer_param param_id, u32 value)
+{
+ struct wmi_ap_ps_peer_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (!mac)
+ return -EINVAL;
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(value);
+ memcpy(&cmd->peer_macaddr, mac, ETH_ALEN);
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
+ vdev_id, param_id, value, mac);
+
+ return ath10k_wmi_cmd_send(ar, skb, WMI_AP_PS_PEER_PARAM_CMDID);
+}
+
+int ath10k_wmi_scan_chan_list(struct ath10k *ar,
+ const struct wmi_scan_chan_list_arg *arg)
+{
+ struct wmi_scan_chan_list_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_channel_arg *ch;
+ struct wmi_channel *ci;
+ int len;
+ int i;
+
+ len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel);
+
+ skb = ath10k_wmi_alloc_skb(len);
+ if (!skb)
+ return -EINVAL;
+
+ cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
+ cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
+
+ for (i = 0; i < arg->n_channels; i++) {
+ u32 flags = 0;
+
+ ch = &arg->channels[i];
+ ci = &cmd->chan_info[i];
+
+ if (ch->passive)
+ flags |= WMI_CHAN_FLAG_PASSIVE;
+ if (ch->allow_ibss)
+ flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
+ if (ch->allow_ht)
+ flags |= WMI_CHAN_FLAG_ALLOW_HT;
+ if (ch->allow_vht)
+ flags |= WMI_CHAN_FLAG_ALLOW_VHT;
+ if (ch->ht40plus)
+ flags |= WMI_CHAN_FLAG_HT40_PLUS;
+
+ ci->mhz = __cpu_to_le32(ch->freq);
+ ci->band_center_freq1 = __cpu_to_le32(ch->freq);
+ ci->band_center_freq2 = 0;
+ ci->min_power = ch->min_power;
+ ci->max_power = ch->max_power;
+ ci->reg_power = ch->max_reg_power;
+ ci->antenna_max = ch->max_antenna_gain;
+ ci->antenna_max = 0;
+
+ /* mode & flags share storage */
+ ci->mode = ch->mode;
+ ci->flags |= __cpu_to_le32(flags);
+ }
+
+ return ath10k_wmi_cmd_send(ar, skb, WMI_SCAN_CHAN_LIST_CMDID);
+}
+
+int ath10k_wmi_peer_assoc(struct ath10k *ar,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct wmi_peer_assoc_complete_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (arg->peer_mpdu_density > 16)
+ return -EINVAL;
+ if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
+ return -EINVAL;
+ if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
+ return -EINVAL;
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_assoc_complete_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->peer_new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
+ cmd->peer_associd = __cpu_to_le32(arg->peer_aid);
+ cmd->peer_flags = __cpu_to_le32(arg->peer_flags);
+ cmd->peer_caps = __cpu_to_le32(arg->peer_caps);
+ cmd->peer_listen_intval = __cpu_to_le32(arg->peer_listen_intval);
+ cmd->peer_ht_caps = __cpu_to_le32(arg->peer_ht_caps);
+ cmd->peer_max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
+ cmd->peer_mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
+ cmd->peer_rate_caps = __cpu_to_le32(arg->peer_rate_caps);
+ cmd->peer_nss = __cpu_to_le32(arg->peer_num_spatial_streams);
+ cmd->peer_vht_caps = __cpu_to_le32(arg->peer_vht_caps);
+ cmd->peer_phymode = __cpu_to_le32(arg->peer_phymode);
+
+ memcpy(cmd->peer_macaddr.addr, arg->addr, ETH_ALEN);
+
+ cmd->peer_legacy_rates.num_rates =
+ __cpu_to_le32(arg->peer_legacy_rates.num_rates);
+ memcpy(cmd->peer_legacy_rates.rates, arg->peer_legacy_rates.rates,
+ arg->peer_legacy_rates.num_rates);
+
+ cmd->peer_ht_rates.num_rates =
+ __cpu_to_le32(arg->peer_ht_rates.num_rates);
+ memcpy(cmd->peer_ht_rates.rates, arg->peer_ht_rates.rates,
+ arg->peer_ht_rates.num_rates);
+
+ cmd->peer_vht_rates.rx_max_rate =
+ __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
+ cmd->peer_vht_rates.rx_mcs_set =
+ __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
+ cmd->peer_vht_rates.tx_max_rate =
+ __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
+ cmd->peer_vht_rates.tx_mcs_set =
+ __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
+
+ return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID);
+}
+
+int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg)
+{
+ struct wmi_bcn_tx_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->bcn_len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_bcn_tx_cmd *)skb->data;
+ cmd->hdr.vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->hdr.tx_rate = __cpu_to_le32(arg->tx_rate);
+ cmd->hdr.tx_power = __cpu_to_le32(arg->tx_power);
+ cmd->hdr.bcn_len = __cpu_to_le32(arg->bcn_len);
+ memcpy(cmd->bcn, arg->bcn, arg->bcn_len);
+
+ return ath10k_wmi_cmd_send(ar, skb, WMI_BCN_TX_CMDID);
+}
+
+static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
+ const struct wmi_wmm_params_arg *arg)
+{
+ params->cwmin = __cpu_to_le32(arg->cwmin);
+ params->cwmax = __cpu_to_le32(arg->cwmax);
+ params->aifs = __cpu_to_le32(arg->aifs);
+ params->txop = __cpu_to_le32(arg->txop);
+ params->acm = __cpu_to_le32(arg->acm);
+ params->no_ack = __cpu_to_le32(arg->no_ack);
+}
+
+int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
+ const struct wmi_pdev_set_wmm_params_arg *arg)
+{
+ struct wmi_pdev_set_wmm_params *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_wmm_params *)skb->data;
+ ath10k_wmi_pdev_set_wmm_param(&cmd->ac_be, &arg->ac_be);
+ ath10k_wmi_pdev_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
+ ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
+ ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
+
+ ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
+ return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_WMM_PARAMS_CMDID);
+}
+
+int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
+{
+ struct wmi_request_stats_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_request_stats_cmd *)skb->data;
+ cmd->stats_id = __cpu_to_le32(stats_id);
+
+ ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
+ return ath10k_wmi_cmd_send(ar, skb, WMI_REQUEST_STATS_CMDID);
+}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
new file mode 100644
index 000000000000..9555f5a0e041
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -0,0 +1,3052 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WMI_H_
+#define _WMI_H_
+
+#include <linux/types.h>
+#include <net/mac80211.h>
+
+/*
+ * This file specifies the WMI interface for the Unified Software
+ * Architecture.
+ *
+ * It includes definitions of all the commands and events. Commands are
+ * messages from the host to the target. Events and Replies are messages
+ * from the target to the host.
+ *
+ * Ownership of correctness in regards to WMI commands belongs to the host
+ * driver and the target is not required to validate parameters for value,
+ * proper range, or any other checking.
+ *
+ * Guidelines for extending this interface are below.
+ *
+ * 1. Add new WMI commands ONLY within the specified range - 0x9000 - 0x9fff
+ *
+ * 2. Use ONLY u32 type for defining member variables within WMI
+ * command/event structures. Do not use u8, u16, bool or
+ * enum types within these structures.
+ *
+ * 3. DO NOT define bit fields within structures. Implement bit fields
+ * using masks if necessary. Do not use the programming language's bit
+ * field definition.
+ *
+ * 4. Define macros for encode/decode of u8, u16 fields within
+ * the u32 variables. Use these macros for set/get of these fields.
+ * Try to use this to optimize the structure without bloating it with
+ * u32 variables for every lower sized field.
+ *
+ * 5. Do not use PACK/UNPACK attributes for the structures as each member
+ * variable is already 4-byte aligned by virtue of being a u32
+ * type.
+ *
+ * 6. Comment each parameter part of the WMI command/event structure by
+ * using the 2 stars at the begining of C comment instead of one star to
+ * enable HTML document generation using Doxygen.
+ *
+ */
+
+/* Control Path */
+struct wmi_cmd_hdr {
+ __le32 cmd_id;
+} __packed;
+
+#define WMI_CMD_HDR_CMD_ID_MASK 0x00FFFFFF
+#define WMI_CMD_HDR_CMD_ID_LSB 0
+#define WMI_CMD_HDR_PLT_PRIV_MASK 0xFF000000
+#define WMI_CMD_HDR_PLT_PRIV_LSB 24
+
+#define HTC_PROTOCOL_VERSION 0x0002
+#define WMI_PROTOCOL_VERSION 0x0002
+
+enum wmi_service_id {
+ WMI_SERVICE_BEACON_OFFLOAD = 0, /* beacon offload */
+ WMI_SERVICE_SCAN_OFFLOAD, /* scan offload */
+ WMI_SERVICE_ROAM_OFFLOAD, /* roam offload */
+ WMI_SERVICE_BCN_MISS_OFFLOAD, /* beacon miss offload */
+ WMI_SERVICE_STA_PWRSAVE, /* fake sleep + basic power save */
+ WMI_SERVICE_STA_ADVANCED_PWRSAVE, /* uapsd, pspoll, force sleep */
+ WMI_SERVICE_AP_UAPSD, /* uapsd on AP */
+ WMI_SERVICE_AP_DFS, /* DFS on AP */
+ WMI_SERVICE_11AC, /* supports 11ac */
+ WMI_SERVICE_BLOCKACK, /* Supports triggering ADDBA/DELBA from host*/
+ WMI_SERVICE_PHYERR, /* PHY error */
+ WMI_SERVICE_BCN_FILTER, /* Beacon filter support */
+ WMI_SERVICE_RTT, /* RTT (round trip time) support */
+ WMI_SERVICE_RATECTRL, /* Rate-control */
+ WMI_SERVICE_WOW, /* WOW Support */
+ WMI_SERVICE_RATECTRL_CACHE, /* Rate-control caching */
+ WMI_SERVICE_IRAM_TIDS, /* TIDs in IRAM */
+ WMI_SERVICE_ARPNS_OFFLOAD, /* ARP NS Offload support */
+ WMI_SERVICE_NLO, /* Network list offload service */
+ WMI_SERVICE_GTK_OFFLOAD, /* GTK offload */
+ WMI_SERVICE_SCAN_SCH, /* Scan Scheduler Service */
+ WMI_SERVICE_CSA_OFFLOAD, /* CSA offload service */
+ WMI_SERVICE_CHATTER, /* Chatter service */
+ WMI_SERVICE_COEX_FREQAVOID, /* FW report freq range to avoid */
+ WMI_SERVICE_PACKET_POWER_SAVE, /* packet power save service */
+ WMI_SERVICE_FORCE_FW_HANG, /* To test fw recovery mechanism */
+ WMI_SERVICE_GPIO, /* GPIO service */
+ WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, /* Modulated DTIM support */
+ WMI_STA_UAPSD_BASIC_AUTO_TRIG, /* UAPSD AC Trigger Generation */
+ WMI_STA_UAPSD_VAR_AUTO_TRIG, /* -do- */
+ WMI_SERVICE_STA_KEEP_ALIVE, /* STA keep alive mechanism support */
+ WMI_SERVICE_TX_ENCAP, /* Packet type for TX encapsulation */
+
+ WMI_SERVICE_LAST,
+ WMI_MAX_SERVICE = 64 /* max service */
+};
+
+static inline char *wmi_service_name(int service_id)
+{
+ switch (service_id) {
+ case WMI_SERVICE_BEACON_OFFLOAD:
+ return "BEACON_OFFLOAD";
+ case WMI_SERVICE_SCAN_OFFLOAD:
+ return "SCAN_OFFLOAD";
+ case WMI_SERVICE_ROAM_OFFLOAD:
+ return "ROAM_OFFLOAD";
+ case WMI_SERVICE_BCN_MISS_OFFLOAD:
+ return "BCN_MISS_OFFLOAD";
+ case WMI_SERVICE_STA_PWRSAVE:
+ return "STA_PWRSAVE";
+ case WMI_SERVICE_STA_ADVANCED_PWRSAVE:
+ return "STA_ADVANCED_PWRSAVE";
+ case WMI_SERVICE_AP_UAPSD:
+ return "AP_UAPSD";
+ case WMI_SERVICE_AP_DFS:
+ return "AP_DFS";
+ case WMI_SERVICE_11AC:
+ return "11AC";
+ case WMI_SERVICE_BLOCKACK:
+ return "BLOCKACK";
+ case WMI_SERVICE_PHYERR:
+ return "PHYERR";
+ case WMI_SERVICE_BCN_FILTER:
+ return "BCN_FILTER";
+ case WMI_SERVICE_RTT:
+ return "RTT";
+ case WMI_SERVICE_RATECTRL:
+ return "RATECTRL";
+ case WMI_SERVICE_WOW:
+ return "WOW";
+ case WMI_SERVICE_RATECTRL_CACHE:
+ return "RATECTRL CACHE";
+ case WMI_SERVICE_IRAM_TIDS:
+ return "IRAM TIDS";
+ case WMI_SERVICE_ARPNS_OFFLOAD:
+ return "ARPNS_OFFLOAD";
+ case WMI_SERVICE_NLO:
+ return "NLO";
+ case WMI_SERVICE_GTK_OFFLOAD:
+ return "GTK_OFFLOAD";
+ case WMI_SERVICE_SCAN_SCH:
+ return "SCAN_SCH";
+ case WMI_SERVICE_CSA_OFFLOAD:
+ return "CSA_OFFLOAD";
+ case WMI_SERVICE_CHATTER:
+ return "CHATTER";
+ case WMI_SERVICE_COEX_FREQAVOID:
+ return "COEX_FREQAVOID";
+ case WMI_SERVICE_PACKET_POWER_SAVE:
+ return "PACKET_POWER_SAVE";
+ case WMI_SERVICE_FORCE_FW_HANG:
+ return "FORCE FW HANG";
+ case WMI_SERVICE_GPIO:
+ return "GPIO";
+ case WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM:
+ return "MODULATED DTIM";
+ case WMI_STA_UAPSD_BASIC_AUTO_TRIG:
+ return "BASIC UAPSD";
+ case WMI_STA_UAPSD_VAR_AUTO_TRIG:
+ return "VAR UAPSD";
+ case WMI_SERVICE_STA_KEEP_ALIVE:
+ return "STA KEEP ALIVE";
+ case WMI_SERVICE_TX_ENCAP:
+ return "TX ENCAP";
+ default:
+ return "UNKNOWN SERVICE\n";
+ }
+}
+
+
+#define WMI_SERVICE_BM_SIZE \
+ ((WMI_MAX_SERVICE + sizeof(u32) - 1)/sizeof(u32))
+
+/* 2 word representation of MAC addr */
+struct wmi_mac_addr {
+ union {
+ u8 addr[6];
+ struct {
+ u32 word0;
+ u32 word1;
+ } __packed;
+ } __packed;
+} __packed;
+
+/* macro to convert MAC address from WMI word format to char array */
+#define WMI_MAC_ADDR_TO_CHAR_ARRAY(pwmi_mac_addr, c_macaddr) do { \
+ (c_macaddr)[0] = ((pwmi_mac_addr)->word0) & 0xff; \
+ (c_macaddr)[1] = (((pwmi_mac_addr)->word0) >> 8) & 0xff; \
+ (c_macaddr)[2] = (((pwmi_mac_addr)->word0) >> 16) & 0xff; \
+ (c_macaddr)[3] = (((pwmi_mac_addr)->word0) >> 24) & 0xff; \
+ (c_macaddr)[4] = ((pwmi_mac_addr)->word1) & 0xff; \
+ (c_macaddr)[5] = (((pwmi_mac_addr)->word1) >> 8) & 0xff; \
+ } while (0)
+
+/*
+ * wmi command groups.
+ */
+enum wmi_cmd_group {
+ /* 0 to 2 are reserved */
+ WMI_GRP_START = 0x3,
+ WMI_GRP_SCAN = WMI_GRP_START,
+ WMI_GRP_PDEV,
+ WMI_GRP_VDEV,
+ WMI_GRP_PEER,
+ WMI_GRP_MGMT,
+ WMI_GRP_BA_NEG,
+ WMI_GRP_STA_PS,
+ WMI_GRP_DFS,
+ WMI_GRP_ROAM,
+ WMI_GRP_OFL_SCAN,
+ WMI_GRP_P2P,
+ WMI_GRP_AP_PS,
+ WMI_GRP_RATE_CTRL,
+ WMI_GRP_PROFILE,
+ WMI_GRP_SUSPEND,
+ WMI_GRP_BCN_FILTER,
+ WMI_GRP_WOW,
+ WMI_GRP_RTT,
+ WMI_GRP_SPECTRAL,
+ WMI_GRP_STATS,
+ WMI_GRP_ARP_NS_OFL,
+ WMI_GRP_NLO_OFL,
+ WMI_GRP_GTK_OFL,
+ WMI_GRP_CSA_OFL,
+ WMI_GRP_CHATTER,
+ WMI_GRP_TID_ADDBA,
+ WMI_GRP_MISC,
+ WMI_GRP_GPIO,
+};
+
+#define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_EVT_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1)
+
+/* Command IDs and commande events. */
+enum wmi_cmd_id {
+ WMI_INIT_CMDID = 0x1,
+
+ /* Scan specific commands */
+ WMI_START_SCAN_CMDID = WMI_CMD_GRP(WMI_GRP_SCAN),
+ WMI_STOP_SCAN_CMDID,
+ WMI_SCAN_CHAN_LIST_CMDID,
+ WMI_SCAN_SCH_PRIO_TBL_CMDID,
+
+ /* PDEV (physical device) specific commands */
+ WMI_PDEV_SET_REGDOMAIN_CMDID = WMI_CMD_GRP(WMI_GRP_PDEV),
+ WMI_PDEV_SET_CHANNEL_CMDID,
+ WMI_PDEV_SET_PARAM_CMDID,
+ WMI_PDEV_PKTLOG_ENABLE_CMDID,
+ WMI_PDEV_PKTLOG_DISABLE_CMDID,
+ WMI_PDEV_SET_WMM_PARAMS_CMDID,
+ WMI_PDEV_SET_HT_CAP_IE_CMDID,
+ WMI_PDEV_SET_VHT_CAP_IE_CMDID,
+ WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_PDEV_SET_QUIET_MODE_CMDID,
+ WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ WMI_PDEV_GET_TPC_CONFIG_CMDID,
+ WMI_PDEV_SET_BASE_MACADDR_CMDID,
+
+ /* VDEV (virtual device) specific commands */
+ WMI_VDEV_CREATE_CMDID = WMI_CMD_GRP(WMI_GRP_VDEV),
+ WMI_VDEV_DELETE_CMDID,
+ WMI_VDEV_START_REQUEST_CMDID,
+ WMI_VDEV_RESTART_REQUEST_CMDID,
+ WMI_VDEV_UP_CMDID,
+ WMI_VDEV_STOP_CMDID,
+ WMI_VDEV_DOWN_CMDID,
+ WMI_VDEV_SET_PARAM_CMDID,
+ WMI_VDEV_INSTALL_KEY_CMDID,
+
+ /* peer specific commands */
+ WMI_PEER_CREATE_CMDID = WMI_CMD_GRP(WMI_GRP_PEER),
+ WMI_PEER_DELETE_CMDID,
+ WMI_PEER_FLUSH_TIDS_CMDID,
+ WMI_PEER_SET_PARAM_CMDID,
+ WMI_PEER_ASSOC_CMDID,
+ WMI_PEER_ADD_WDS_ENTRY_CMDID,
+ WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
+ WMI_PEER_MCAST_GROUP_CMDID,
+
+ /* beacon/management specific commands */
+ WMI_BCN_TX_CMDID = WMI_CMD_GRP(WMI_GRP_MGMT),
+ WMI_PDEV_SEND_BCN_CMDID,
+ WMI_BCN_TMPL_CMDID,
+ WMI_BCN_FILTER_RX_CMDID,
+ WMI_PRB_REQ_FILTER_RX_CMDID,
+ WMI_MGMT_TX_CMDID,
+ WMI_PRB_TMPL_CMDID,
+
+ /* commands to directly control BA negotiation directly from host. */
+ WMI_ADDBA_CLEAR_RESP_CMDID = WMI_CMD_GRP(WMI_GRP_BA_NEG),
+ WMI_ADDBA_SEND_CMDID,
+ WMI_ADDBA_STATUS_CMDID,
+ WMI_DELBA_SEND_CMDID,
+ WMI_ADDBA_SET_RESP_CMDID,
+ WMI_SEND_SINGLEAMSDU_CMDID,
+
+ /* Station power save specific config */
+ WMI_STA_POWERSAVE_MODE_CMDID = WMI_CMD_GRP(WMI_GRP_STA_PS),
+ WMI_STA_POWERSAVE_PARAM_CMDID,
+ WMI_STA_MIMO_PS_MODE_CMDID,
+
+ /** DFS-specific commands */
+ WMI_PDEV_DFS_ENABLE_CMDID = WMI_CMD_GRP(WMI_GRP_DFS),
+ WMI_PDEV_DFS_DISABLE_CMDID,
+
+ /* Roaming specific commands */
+ WMI_ROAM_SCAN_MODE = WMI_CMD_GRP(WMI_GRP_ROAM),
+ WMI_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_ROAM_SCAN_PERIOD,
+ WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_ROAM_AP_PROFILE,
+
+ /* offload scan specific commands */
+ WMI_OFL_SCAN_ADD_AP_PROFILE = WMI_CMD_GRP(WMI_GRP_OFL_SCAN),
+ WMI_OFL_SCAN_REMOVE_AP_PROFILE,
+ WMI_OFL_SCAN_PERIOD,
+
+ /* P2P specific commands */
+ WMI_P2P_DEV_SET_DEVICE_INFO = WMI_CMD_GRP(WMI_GRP_P2P),
+ WMI_P2P_DEV_SET_DISCOVERABILITY,
+ WMI_P2P_GO_SET_BEACON_IE,
+ WMI_P2P_GO_SET_PROBE_RESP_IE,
+ WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
+
+ /* AP power save specific config */
+ WMI_AP_PS_PEER_PARAM_CMDID = WMI_CMD_GRP(WMI_GRP_AP_PS),
+ WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
+
+ /* Rate-control specific commands */
+ WMI_PEER_RATE_RETRY_SCHED_CMDID =
+ WMI_CMD_GRP(WMI_GRP_RATE_CTRL),
+
+ /* WLAN Profiling commands. */
+ WMI_WLAN_PROFILE_TRIGGER_CMDID = WMI_CMD_GRP(WMI_GRP_PROFILE),
+ WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+
+ /* Suspend resume command Ids */
+ WMI_PDEV_SUSPEND_CMDID = WMI_CMD_GRP(WMI_GRP_SUSPEND),
+ WMI_PDEV_RESUME_CMDID,
+
+ /* Beacon filter commands */
+ WMI_ADD_BCN_FILTER_CMDID = WMI_CMD_GRP(WMI_GRP_BCN_FILTER),
+ WMI_RMV_BCN_FILTER_CMDID,
+
+ /* WOW Specific WMI commands*/
+ WMI_WOW_ADD_WAKE_PATTERN_CMDID = WMI_CMD_GRP(WMI_GRP_WOW),
+ WMI_WOW_DEL_WAKE_PATTERN_CMDID,
+ WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ WMI_WOW_ENABLE_CMDID,
+ WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+
+ /* RTT measurement related cmd */
+ WMI_RTT_MEASREQ_CMDID = WMI_CMD_GRP(WMI_GRP_RTT),
+ WMI_RTT_TSF_CMDID,
+
+ /* spectral scan commands */
+ WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID = WMI_CMD_GRP(WMI_GRP_SPECTRAL),
+ WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+
+ /* F/W stats */
+ WMI_REQUEST_STATS_CMDID = WMI_CMD_GRP(WMI_GRP_STATS),
+
+ /* ARP OFFLOAD REQUEST*/
+ WMI_SET_ARP_NS_OFFLOAD_CMDID = WMI_CMD_GRP(WMI_GRP_ARP_NS_OFL),
+
+ /* NS offload confid*/
+ WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID = WMI_CMD_GRP(WMI_GRP_NLO_OFL),
+
+ /* GTK offload Specific WMI commands*/
+ WMI_GTK_OFFLOAD_CMDID = WMI_CMD_GRP(WMI_GRP_GTK_OFL),
+
+ /* CSA offload Specific WMI commands*/
+ WMI_CSA_OFFLOAD_ENABLE_CMDID = WMI_CMD_GRP(WMI_GRP_CSA_OFL),
+ WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
+
+ /* Chatter commands*/
+ WMI_CHATTER_SET_MODE_CMDID = WMI_CMD_GRP(WMI_GRP_CHATTER),
+
+ /* addba specific commands */
+ WMI_PEER_TID_ADDBA_CMDID = WMI_CMD_GRP(WMI_GRP_TID_ADDBA),
+ WMI_PEER_TID_DELBA_CMDID,
+
+ /* set station mimo powersave method */
+ WMI_STA_DTIM_PS_METHOD_CMDID,
+ /* Configure the Station UAPSD AC Auto Trigger Parameters */
+ WMI_STA_UAPSD_AUTO_TRIG_CMDID,
+
+ /* STA Keep alive parameter configuration,
+ Requires WMI_SERVICE_STA_KEEP_ALIVE */
+ WMI_STA_KEEPALIVE_CMD,
+
+ /* misc command group */
+ WMI_ECHO_CMDID = WMI_CMD_GRP(WMI_GRP_MISC),
+ WMI_PDEV_UTF_CMDID,
+ WMI_DBGLOG_CFG_CMDID,
+ WMI_PDEV_QVIT_CMDID,
+ WMI_PDEV_FTM_INTG_CMDID,
+ WMI_VDEV_SET_KEEPALIVE_CMDID,
+ WMI_VDEV_GET_KEEPALIVE_CMDID,
+
+ /* GPIO Configuration */
+ WMI_GPIO_CONFIG_CMDID = WMI_CMD_GRP(WMI_GRP_GPIO),
+ WMI_GPIO_OUTPUT_CMDID,
+};
+
+enum wmi_event_id {
+ WMI_SERVICE_READY_EVENTID = 0x1,
+ WMI_READY_EVENTID,
+
+ /* Scan specific events */
+ WMI_SCAN_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_SCAN),
+
+ /* PDEV specific events */
+ WMI_PDEV_TPC_CONFIG_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_PDEV),
+ WMI_CHAN_INFO_EVENTID,
+ WMI_PHYERR_EVENTID,
+
+ /* VDEV specific events */
+ WMI_VDEV_START_RESP_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_VDEV),
+ WMI_VDEV_STOPPED_EVENTID,
+ WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
+
+ /* peer specific events */
+ WMI_PEER_STA_KICKOUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_PEER),
+
+ /* beacon/mgmt specific events */
+ WMI_MGMT_RX_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MGMT),
+ WMI_HOST_SWBA_EVENTID,
+ WMI_TBTTOFFSET_UPDATE_EVENTID,
+
+ /* ADDBA Related WMI Events*/
+ WMI_TX_DELBA_COMPLETE_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_BA_NEG),
+ WMI_TX_ADDBA_COMPLETE_EVENTID,
+
+ /* Roam event to trigger roaming on host */
+ WMI_ROAM_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_ROAM),
+ WMI_PROFILE_MATCH,
+
+ /* WoW */
+ WMI_WOW_WAKEUP_HOST_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_WOW),
+
+ /* RTT */
+ WMI_RTT_MEASUREMENT_REPORT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_RTT),
+ WMI_TSF_MEASUREMENT_REPORT_EVENTID,
+ WMI_RTT_ERROR_REPORT_EVENTID,
+
+ /* GTK offload */
+ WMI_GTK_OFFLOAD_STATUS_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GTK_OFL),
+ WMI_GTK_REKEY_FAIL_EVENTID,
+
+ /* CSA IE received event */
+ WMI_CSA_HANDLING_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_CSA_OFL),
+
+ /* Misc events */
+ WMI_ECHO_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MISC),
+ WMI_PDEV_UTF_EVENTID,
+ WMI_DEBUG_MESG_EVENTID,
+ WMI_UPDATE_STATS_EVENTID,
+ WMI_DEBUG_PRINT_EVENTID,
+ WMI_DCS_INTERFERENCE_EVENTID,
+ WMI_PDEV_QVIT_EVENTID,
+ WMI_WLAN_PROFILE_DATA_EVENTID,
+ WMI_PDEV_FTM_INTG_EVENTID,
+ WMI_WLAN_FREQ_AVOID_EVENTID,
+ WMI_VDEV_GET_KEEPALIVE_EVENTID,
+
+ /* GPIO Event */
+ WMI_GPIO_INPUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GPIO),
+};
+
+enum wmi_phy_mode {
+ MODE_11A = 0, /* 11a Mode */
+ MODE_11G = 1, /* 11b/g Mode */
+ MODE_11B = 2, /* 11b Mode */
+ MODE_11GONLY = 3, /* 11g only Mode */
+ MODE_11NA_HT20 = 4, /* 11a HT20 mode */
+ MODE_11NG_HT20 = 5, /* 11g HT20 mode */
+ MODE_11NA_HT40 = 6, /* 11a HT40 mode */
+ MODE_11NG_HT40 = 7, /* 11g HT40 mode */
+ MODE_11AC_VHT20 = 8,
+ MODE_11AC_VHT40 = 9,
+ MODE_11AC_VHT80 = 10,
+ /* MODE_11AC_VHT160 = 11, */
+ MODE_11AC_VHT20_2G = 11,
+ MODE_11AC_VHT40_2G = 12,
+ MODE_11AC_VHT80_2G = 13,
+ MODE_UNKNOWN = 14,
+ MODE_MAX = 14
+};
+
+#define WMI_CHAN_LIST_TAG 0x1
+#define WMI_SSID_LIST_TAG 0x2
+#define WMI_BSSID_LIST_TAG 0x3
+#define WMI_IE_TAG 0x4
+
+struct wmi_channel {
+ __le32 mhz;
+ __le32 band_center_freq1;
+ __le32 band_center_freq2; /* valid for 11ac, 80plus80 */
+ union {
+ __le32 flags; /* WMI_CHAN_FLAG_ */
+ struct {
+ u8 mode; /* only 6 LSBs */
+ } __packed;
+ } __packed;
+ union {
+ __le32 reginfo0;
+ struct {
+ u8 min_power;
+ u8 max_power;
+ u8 reg_power;
+ u8 reg_classid;
+ } __packed;
+ } __packed;
+ union {
+ __le32 reginfo1;
+ struct {
+ u8 antenna_max;
+ } __packed;
+ } __packed;
+} __packed;
+
+struct wmi_channel_arg {
+ u32 freq;
+ u32 band_center_freq1;
+ bool passive;
+ bool allow_ibss;
+ bool allow_ht;
+ bool allow_vht;
+ bool ht40plus;
+ /* note: power unit is 1/4th of dBm */
+ u32 min_power;
+ u32 max_power;
+ u32 max_reg_power;
+ u32 max_antenna_gain;
+ u32 reg_class_id;
+ enum wmi_phy_mode mode;
+};
+
+enum wmi_channel_change_cause {
+ WMI_CHANNEL_CHANGE_CAUSE_NONE = 0,
+ WMI_CHANNEL_CHANGE_CAUSE_CSA,
+};
+
+#define WMI_CHAN_FLAG_HT40_PLUS (1 << 6)
+#define WMI_CHAN_FLAG_PASSIVE (1 << 7)
+#define WMI_CHAN_FLAG_ADHOC_ALLOWED (1 << 8)
+#define WMI_CHAN_FLAG_AP_DISABLED (1 << 9)
+#define WMI_CHAN_FLAG_DFS (1 << 10)
+#define WMI_CHAN_FLAG_ALLOW_HT (1 << 11)
+#define WMI_CHAN_FLAG_ALLOW_VHT (1 << 12)
+
+/* Indicate reason for channel switch */
+#define WMI_CHANNEL_CHANGE_CAUSE_CSA (1 << 13)
+
+#define WMI_MAX_SPATIAL_STREAM 3
+
+/* HT Capabilities*/
+#define WMI_HT_CAP_ENABLED 0x0001 /* HT Enabled/ disabled */
+#define WMI_HT_CAP_HT20_SGI 0x0002 /* Short Guard Interval with HT20 */
+#define WMI_HT_CAP_DYNAMIC_SMPS 0x0004 /* Dynamic MIMO powersave */
+#define WMI_HT_CAP_TX_STBC 0x0008 /* B3 TX STBC */
+#define WMI_HT_CAP_TX_STBC_MASK_SHIFT 3
+#define WMI_HT_CAP_RX_STBC 0x0030 /* B4-B5 RX STBC */
+#define WMI_HT_CAP_RX_STBC_MASK_SHIFT 4
+#define WMI_HT_CAP_LDPC 0x0040 /* LDPC supported */
+#define WMI_HT_CAP_L_SIG_TXOP_PROT 0x0080 /* L-SIG TXOP Protection */
+#define WMI_HT_CAP_MPDU_DENSITY 0x0700 /* MPDU Density */
+#define WMI_HT_CAP_MPDU_DENSITY_MASK_SHIFT 8
+#define WMI_HT_CAP_HT40_SGI 0x0800
+
+#define WMI_HT_CAP_DEFAULT_ALL (WMI_HT_CAP_ENABLED | \
+ WMI_HT_CAP_HT20_SGI | \
+ WMI_HT_CAP_HT40_SGI | \
+ WMI_HT_CAP_TX_STBC | \
+ WMI_HT_CAP_RX_STBC | \
+ WMI_HT_CAP_LDPC)
+
+
+/*
+ * WMI_VHT_CAP_* these maps to ieee 802.11ac vht capability information
+ * field. The fields not defined here are not supported, or reserved.
+ * Do not change these masks and if you have to add new one follow the
+ * bitmask as specified by 802.11ac draft.
+ */
+
+#define WMI_VHT_CAP_MAX_MPDU_LEN_MASK 0x00000003
+#define WMI_VHT_CAP_RX_LDPC 0x00000010
+#define WMI_VHT_CAP_SGI_80MHZ 0x00000020
+#define WMI_VHT_CAP_TX_STBC 0x00000080
+#define WMI_VHT_CAP_RX_STBC_MASK 0x00000300
+#define WMI_VHT_CAP_RX_STBC_MASK_SHIFT 8
+#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP 0x03800000
+#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIFT 23
+#define WMI_VHT_CAP_RX_FIXED_ANT 0x10000000
+#define WMI_VHT_CAP_TX_FIXED_ANT 0x20000000
+
+/* The following also refer for max HT AMSDU */
+#define WMI_VHT_CAP_MAX_MPDU_LEN_3839 0x00000000
+#define WMI_VHT_CAP_MAX_MPDU_LEN_7935 0x00000001
+#define WMI_VHT_CAP_MAX_MPDU_LEN_11454 0x00000002
+
+#define WMI_VHT_CAP_DEFAULT_ALL (WMI_VHT_CAP_MAX_MPDU_LEN_11454 | \
+ WMI_VHT_CAP_RX_LDPC | \
+ WMI_VHT_CAP_SGI_80MHZ | \
+ WMI_VHT_CAP_TX_STBC | \
+ WMI_VHT_CAP_RX_STBC_MASK | \
+ WMI_VHT_CAP_MAX_AMPDU_LEN_EXP | \
+ WMI_VHT_CAP_RX_FIXED_ANT | \
+ WMI_VHT_CAP_TX_FIXED_ANT)
+
+/*
+ * Interested readers refer to Rx/Tx MCS Map definition as defined in
+ * 802.11ac
+ */
+#define WMI_VHT_MAX_MCS_4_SS_MASK(r, ss) ((3 & (r)) << (((ss) - 1) << 1))
+#define WMI_VHT_MAX_SUPP_RATE_MASK 0x1fff0000
+#define WMI_VHT_MAX_SUPP_RATE_MASK_SHIFT 16
+
+enum {
+ REGDMN_MODE_11A = 0x00001, /* 11a channels */
+ REGDMN_MODE_TURBO = 0x00002, /* 11a turbo-only channels */
+ REGDMN_MODE_11B = 0x00004, /* 11b channels */
+ REGDMN_MODE_PUREG = 0x00008, /* 11g channels (OFDM only) */
+ REGDMN_MODE_11G = 0x00008, /* XXX historical */
+ REGDMN_MODE_108G = 0x00020, /* 11a+Turbo channels */
+ REGDMN_MODE_108A = 0x00040, /* 11g+Turbo channels */
+ REGDMN_MODE_XR = 0x00100, /* XR channels */
+ REGDMN_MODE_11A_HALF_RATE = 0x00200, /* 11A half rate channels */
+ REGDMN_MODE_11A_QUARTER_RATE = 0x00400, /* 11A quarter rate channels */
+ REGDMN_MODE_11NG_HT20 = 0x00800, /* 11N-G HT20 channels */
+ REGDMN_MODE_11NA_HT20 = 0x01000, /* 11N-A HT20 channels */
+ REGDMN_MODE_11NG_HT40PLUS = 0x02000, /* 11N-G HT40 + channels */
+ REGDMN_MODE_11NG_HT40MINUS = 0x04000, /* 11N-G HT40 - channels */
+ REGDMN_MODE_11NA_HT40PLUS = 0x08000, /* 11N-A HT40 + channels */
+ REGDMN_MODE_11NA_HT40MINUS = 0x10000, /* 11N-A HT40 - channels */
+ REGDMN_MODE_11AC_VHT20 = 0x20000, /* 5Ghz, VHT20 */
+ REGDMN_MODE_11AC_VHT40PLUS = 0x40000, /* 5Ghz, VHT40 + channels */
+ REGDMN_MODE_11AC_VHT40MINUS = 0x80000, /* 5Ghz VHT40 - channels */
+ REGDMN_MODE_11AC_VHT80 = 0x100000, /* 5Ghz, VHT80 channels */
+ REGDMN_MODE_ALL = 0xffffffff
+};
+
+#define REGDMN_CAP1_CHAN_HALF_RATE 0x00000001
+#define REGDMN_CAP1_CHAN_QUARTER_RATE 0x00000002
+#define REGDMN_CAP1_CHAN_HAL49GHZ 0x00000004
+
+/* regulatory capabilities */
+#define REGDMN_EEPROM_EEREGCAP_EN_FCC_MIDBAND 0x0040
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_U1_EVEN 0x0080
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_U2 0x0100
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_MIDBAND 0x0200
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_U1_ODD 0x0400
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_NEW_11A 0x0800
+
+struct hal_reg_capabilities {
+ /* regdomain value specified in EEPROM */
+ __le32 eeprom_rd;
+ /*regdomain */
+ __le32 eeprom_rd_ext;
+ /* CAP1 capabilities bit map. */
+ __le32 regcap1;
+ /* REGDMN EEPROM CAP. */
+ __le32 regcap2;
+ /* REGDMN MODE */
+ __le32 wireless_modes;
+ __le32 low_2ghz_chan;
+ __le32 high_2ghz_chan;
+ __le32 low_5ghz_chan;
+ __le32 high_5ghz_chan;
+} __packed;
+
+enum wlan_mode_capability {
+ WHAL_WLAN_11A_CAPABILITY = 0x1,
+ WHAL_WLAN_11G_CAPABILITY = 0x2,
+ WHAL_WLAN_11AG_CAPABILITY = 0x3,
+};
+
+/* structure used by FW for requesting host memory */
+struct wlan_host_mem_req {
+ /* ID of the request */
+ __le32 req_id;
+ /* size of the of each unit */
+ __le32 unit_size;
+ /* flags to indicate that
+ * the number units is dependent
+ * on number of resources(num vdevs num peers .. etc)
+ */
+ __le32 num_unit_info;
+ /*
+ * actual number of units to allocate . if flags in the num_unit_info
+ * indicate that number of units is tied to number of a particular
+ * resource to allocate then num_units filed is set to 0 and host
+ * will derive the number units from number of the resources it is
+ * requesting.
+ */
+ __le32 num_units;
+} __packed;
+
+#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id) \
+ ((((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
+ (1 << ((svc_id)%(sizeof(u32))))) != 0)
+
+/*
+ * The following struct holds optional payload for
+ * wmi_service_ready_event,e.g., 11ac pass some of the
+ * device capability to the host.
+ */
+struct wmi_service_ready_event {
+ __le32 sw_version;
+ __le32 sw_version_1;
+ __le32 abi_version;
+ /* WMI_PHY_CAPABILITY */
+ __le32 phy_capability;
+ /* Maximum number of frag table entries that SW will populate less 1 */
+ __le32 max_frag_entry;
+ __le32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
+ __le32 num_rf_chains;
+ /*
+ * The following field is only valid for service type
+ * WMI_SERVICE_11AC
+ */
+ __le32 ht_cap_info; /* WMI HT Capability */
+ __le32 vht_cap_info; /* VHT capability info field of 802.11ac */
+ __le32 vht_supp_mcs; /* VHT Supported MCS Set field Rx/Tx same */
+ __le32 hw_min_tx_power;
+ __le32 hw_max_tx_power;
+ struct hal_reg_capabilities hal_reg_capabilities;
+ __le32 sys_cap_info;
+ __le32 min_pkt_size_enable; /* Enterprise mode short pkt enable */
+ /*
+ * Max beacon and Probe Response IE offload size
+ * (includes optional P2P IEs)
+ */
+ __le32 max_bcn_ie_size;
+ /*
+ * request to host to allocate a chuck of memory and pss it down to FW
+ * via WM_INIT. FW uses this as FW extesnsion memory for saving its
+ * data structures. Only valid for low latency interfaces like PCIE
+ * where FW can access this memory directly (or) by DMA.
+ */
+ __le32 num_mem_reqs;
+ struct wlan_host_mem_req mem_reqs[1];
+} __packed;
+
+/*
+ * status consists of upper 16 bits fo int status and lower 16 bits of
+ * module ID that retuned status
+ */
+#define WLAN_INIT_STATUS_SUCCESS 0x0
+#define WLAN_GET_INIT_STATUS_REASON(status) ((status) & 0xffff)
+#define WLAN_GET_INIT_STATUS_MODULE_ID(status) (((status) >> 16) & 0xffff)
+
+#define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ)
+#define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ)
+
+struct wmi_ready_event {
+ __le32 sw_version;
+ __le32 abi_version;
+ struct wmi_mac_addr mac_addr;
+ __le32 status;
+} __packed;
+
+struct wmi_resource_config {
+ /* number of virtual devices (VAPs) to support */
+ __le32 num_vdevs;
+
+ /* number of peer nodes to support */
+ __le32 num_peers;
+
+ /*
+ * In offload mode target supports features like WOW, chatter and
+ * other protocol offloads. In order to support them some
+ * functionalities like reorder buffering, PN checking need to be
+ * done in target. This determines maximum number of peers suported
+ * by target in offload mode
+ */
+ __le32 num_offload_peers;
+
+ /* For target-based RX reordering */
+ __le32 num_offload_reorder_bufs;
+
+ /* number of keys per peer */
+ __le32 num_peer_keys;
+
+ /* total number of TX/RX data TIDs */
+ __le32 num_tids;
+
+ /*
+ * max skid for resolving hash collisions
+ *
+ * The address search table is sparse, so that if two MAC addresses
+ * result in the same hash value, the second of these conflicting
+ * entries can slide to the next index in the address search table,
+ * and use it, if it is unoccupied. This ast_skid_limit parameter
+ * specifies the upper bound on how many subsequent indices to search
+ * over to find an unoccupied space.
+ */
+ __le32 ast_skid_limit;
+
+ /*
+ * the nominal chain mask for transmit
+ *
+ * The chain mask may be modified dynamically, e.g. to operate AP
+ * tx with a reduced number of chains if no clients are associated.
+ * This configuration parameter specifies the nominal chain-mask that
+ * should be used when not operating with a reduced set of tx chains.
+ */
+ __le32 tx_chain_mask;
+
+ /*
+ * the nominal chain mask for receive
+ *
+ * The chain mask may be modified dynamically, e.g. for a client
+ * to use a reduced number of chains for receive if the traffic to
+ * the client is low enough that it doesn't require downlink MIMO
+ * or antenna diversity.
+ * This configuration parameter specifies the nominal chain-mask that
+ * should be used when not operating with a reduced set of rx chains.
+ */
+ __le32 rx_chain_mask;
+
+ /*
+ * what rx reorder timeout (ms) to use for the AC
+ *
+ * Each WMM access class (voice, video, best-effort, background) will
+ * have its own timeout value to dictate how long to wait for missing
+ * rx MPDUs to arrive before flushing subsequent MPDUs that have
+ * already been received.
+ * This parameter specifies the timeout in milliseconds for each
+ * class.
+ */
+ __le32 rx_timeout_pri_vi;
+ __le32 rx_timeout_pri_vo;
+ __le32 rx_timeout_pri_be;
+ __le32 rx_timeout_pri_bk;
+
+ /*
+ * what mode the rx should decap packets to
+ *
+ * MAC can decap to RAW (no decap), native wifi or Ethernet types
+ * THis setting also determines the default TX behavior, however TX
+ * behavior can be modified on a per VAP basis during VAP init
+ */
+ __le32 rx_decap_mode;
+
+ /* what is the maximum scan requests than can be queued */
+ __le32 scan_max_pending_reqs;
+
+ /* maximum VDEV that could use BMISS offload */
+ __le32 bmiss_offload_max_vdev;
+
+ /* maximum VDEV that could use offload roaming */
+ __le32 roam_offload_max_vdev;
+
+ /* maximum AP profiles that would push to offload roaming */
+ __le32 roam_offload_max_ap_profiles;
+
+ /*
+ * how many groups to use for mcast->ucast conversion
+ *
+ * The target's WAL maintains a table to hold information regarding
+ * which peers belong to a given multicast group, so that if
+ * multicast->unicast conversion is enabled, the target can convert
+ * multicast tx frames to a series of unicast tx frames, to each
+ * peer within the multicast group.
+ This num_mcast_groups configuration parameter tells the target how
+ * many multicast groups to provide storage for within its multicast
+ * group membership table.
+ */
+ __le32 num_mcast_groups;
+
+ /*
+ * size to alloc for the mcast membership table
+ *
+ * This num_mcast_table_elems configuration parameter tells the
+ * target how many peer elements it needs to provide storage for in
+ * its multicast group membership table.
+ * These multicast group membership table elements are shared by the
+ * multicast groups stored within the table.
+ */
+ __le32 num_mcast_table_elems;
+
+ /*
+ * whether/how to do multicast->unicast conversion
+ *
+ * This configuration parameter specifies whether the target should
+ * perform multicast --> unicast conversion on transmit, and if so,
+ * what to do if it finds no entries in its multicast group
+ * membership table for the multicast IP address in the tx frame.
+ * Configuration value:
+ * 0 -> Do not perform multicast to unicast conversion.
+ * 1 -> Convert multicast frames to unicast, if the IP multicast
+ * address from the tx frame is found in the multicast group
+ * membership table. If the IP multicast address is not found,
+ * drop the frame.
+ * 2 -> Convert multicast frames to unicast, if the IP multicast
+ * address from the tx frame is found in the multicast group
+ * membership table. If the IP multicast address is not found,
+ * transmit the frame as multicast.
+ */
+ __le32 mcast2ucast_mode;
+
+ /*
+ * how much memory to allocate for a tx PPDU dbg log
+ *
+ * This parameter controls how much memory the target will allocate
+ * to store a log of tx PPDU meta-information (how large the PPDU
+ * was, when it was sent, whether it was successful, etc.)
+ */
+ __le32 tx_dbg_log_size;
+
+ /* how many AST entries to be allocated for WDS */
+ __le32 num_wds_entries;
+
+ /*
+ * MAC DMA burst size, e.g., For target PCI limit can be
+ * 0 -default, 1 256B
+ */
+ __le32 dma_burst_size;
+
+ /*
+ * Fixed delimiters to be inserted after every MPDU to
+ * account for interface latency to avoid underrun.
+ */
+ __le32 mac_aggr_delim;
+
+ /*
+ * determine whether target is responsible for detecting duplicate
+ * non-aggregate MPDU and timing out stale fragments.
+ *
+ * A-MPDU reordering is always performed on the target.
+ *
+ * 0: target responsible for frag timeout and dup checking
+ * 1: host responsible for frag timeout and dup checking
+ */
+ __le32 rx_skip_defrag_timeout_dup_detection_check;
+
+ /*
+ * Configuration for VoW :
+ * No of Video Nodes to be supported
+ * and Max no of descriptors for each Video link (node).
+ */
+ __le32 vow_config;
+
+ /* maximum VDEV that could use GTK offload */
+ __le32 gtk_offload_max_vdev;
+
+ /* Number of msdu descriptors target should use */
+ __le32 num_msdu_desc;
+
+ /*
+ * Max. number of Tx fragments per MSDU
+ * This parameter controls the max number of Tx fragments per MSDU.
+ * This is sent by the target as part of the WMI_SERVICE_READY event
+ * and is overriden by the OS shim as required.
+ */
+ __le32 max_frag_entries;
+} __packed;
+
+/* strucutre describing host memory chunk. */
+struct host_memory_chunk {
+ /* id of the request that is passed up in service ready */
+ __le32 req_id;
+ /* the physical address the memory chunk */
+ __le32 ptr;
+ /* size of the chunk */
+ __le32 size;
+} __packed;
+
+struct wmi_init_cmd {
+ struct wmi_resource_config resource_config;
+ __le32 num_host_mem_chunks;
+
+ /*
+ * variable number of host memory chunks.
+ * This should be the last element in the structure
+ */
+ struct host_memory_chunk host_mem_chunks[1];
+} __packed;
+
+/* TLV for channel list */
+struct wmi_chan_list {
+ __le32 tag; /* WMI_CHAN_LIST_TAG */
+ __le32 num_chan;
+ __le32 channel_list[0];
+} __packed;
+
+struct wmi_bssid_list {
+ __le32 tag; /* WMI_BSSID_LIST_TAG */
+ __le32 num_bssid;
+ struct wmi_mac_addr bssid_list[0];
+} __packed;
+
+struct wmi_ie_data {
+ __le32 tag; /* WMI_IE_TAG */
+ __le32 ie_len;
+ u8 ie_data[0];
+} __packed;
+
+struct wmi_ssid {
+ __le32 ssid_len;
+ u8 ssid[32];
+} __packed;
+
+struct wmi_ssid_list {
+ __le32 tag; /* WMI_SSID_LIST_TAG */
+ __le32 num_ssids;
+ struct wmi_ssid ssids[0];
+} __packed;
+
+/* prefix used by scan requestor ids on the host */
+#define WMI_HOST_SCAN_REQUESTOR_ID_PREFIX 0xA000
+
+/* prefix used by scan request ids generated on the host */
+/* host cycles through the lower 12 bits to generate ids */
+#define WMI_HOST_SCAN_REQ_ID_PREFIX 0xA000
+
+#define WLAN_SCAN_PARAMS_MAX_SSID 16
+#define WLAN_SCAN_PARAMS_MAX_BSSID 4
+#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
+
+/* Scan priority numbers must be sequential, starting with 0 */
+enum wmi_scan_priority {
+ WMI_SCAN_PRIORITY_VERY_LOW = 0,
+ WMI_SCAN_PRIORITY_LOW,
+ WMI_SCAN_PRIORITY_MEDIUM,
+ WMI_SCAN_PRIORITY_HIGH,
+ WMI_SCAN_PRIORITY_VERY_HIGH,
+ WMI_SCAN_PRIORITY_COUNT /* number of priorities supported */
+};
+
+struct wmi_start_scan_cmd {
+ /* Scan ID */
+ __le32 scan_id;
+ /* Scan requestor ID */
+ __le32 scan_req_id;
+ /* VDEV id(interface) that is requesting scan */
+ __le32 vdev_id;
+ /* Scan Priority, input to scan scheduler */
+ __le32 scan_priority;
+ /* Scan events subscription */
+ __le32 notify_scan_events;
+ /* dwell time in msec on active channels */
+ __le32 dwell_time_active;
+ /* dwell time in msec on passive channels */
+ __le32 dwell_time_passive;
+ /*
+ * min time in msec on the BSS channel,only valid if atleast one
+ * VDEV is active
+ */
+ __le32 min_rest_time;
+ /*
+ * max rest time in msec on the BSS channel,only valid if at least
+ * one VDEV is active
+ */
+ /*
+ * the scanner will rest on the bss channel at least min_rest_time
+ * after min_rest_time the scanner will start checking for tx/rx
+ * activity on all VDEVs. if there is no activity the scanner will
+ * switch to off channel. if there is activity the scanner will let
+ * the radio on the bss channel until max_rest_time expires.at
+ * max_rest_time scanner will switch to off channel irrespective of
+ * activity. activity is determined by the idle_time parameter.
+ */
+ __le32 max_rest_time;
+ /*
+ * time before sending next set of probe requests.
+ * The scanner keeps repeating probe requests transmission with
+ * period specified by repeat_probe_time.
+ * The number of probe requests specified depends on the ssid_list
+ * and bssid_list
+ */
+ __le32 repeat_probe_time;
+ /* time in msec between 2 consequetive probe requests with in a set. */
+ __le32 probe_spacing_time;
+ /*
+ * data inactivity time in msec on bss channel that will be used by
+ * scanner for measuring the inactivity.
+ */
+ __le32 idle_time;
+ /* maximum time in msec allowed for scan */
+ __le32 max_scan_time;
+ /*
+ * delay in msec before sending first probe request after switching
+ * to a channel
+ */
+ __le32 probe_delay;
+ /* Scan control flags */
+ __le32 scan_ctrl_flags;
+
+ /* Burst duration time in msecs */
+ __le32 burst_duration;
+ /*
+ * TLV (tag length value ) paramerters follow the scan_cmd structure.
+ * TLV can contain channel list, bssid list, ssid list and
+ * ie. the TLV tags are defined above;
+ */
+} __packed;
+
+struct wmi_ssid_arg {
+ int len;
+ const u8 *ssid;
+};
+
+struct wmi_bssid_arg {
+ const u8 *bssid;
+};
+
+struct wmi_start_scan_arg {
+ u32 scan_id;
+ u32 scan_req_id;
+ u32 vdev_id;
+ u32 scan_priority;
+ u32 notify_scan_events;
+ u32 dwell_time_active;
+ u32 dwell_time_passive;
+ u32 min_rest_time;
+ u32 max_rest_time;
+ u32 repeat_probe_time;
+ u32 probe_spacing_time;
+ u32 idle_time;
+ u32 max_scan_time;
+ u32 probe_delay;
+ u32 scan_ctrl_flags;
+
+ u32 ie_len;
+ u32 n_channels;
+ u32 n_ssids;
+ u32 n_bssids;
+
+ u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN];
+ u32 channels[64];
+ struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID];
+ struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID];
+};
+
+/* scan control flags */
+
+/* passively scan all channels including active channels */
+#define WMI_SCAN_FLAG_PASSIVE 0x1
+/* add wild card ssid probe request even though ssid_list is specified. */
+#define WMI_SCAN_ADD_BCAST_PROBE_REQ 0x2
+/* add cck rates to rates/xrate ie for the generated probe request */
+#define WMI_SCAN_ADD_CCK_RATES 0x4
+/* add ofdm rates to rates/xrate ie for the generated probe request */
+#define WMI_SCAN_ADD_OFDM_RATES 0x8
+/* To enable indication of Chan load and Noise floor to host */
+#define WMI_SCAN_CHAN_STAT_EVENT 0x10
+/* Filter Probe request frames */
+#define WMI_SCAN_FILTER_PROBE_REQ 0x20
+/* When set, DFS channels will not be scanned */
+#define WMI_SCAN_BYPASS_DFS_CHN 0x40
+/* Different FW scan engine may choose to bail out on errors.
+ * Allow the driver to have influence over that. */
+#define WMI_SCAN_CONTINUE_ON_ERROR 0x80
+
+/* WMI_SCAN_CLASS_MASK must be the same value as IEEE80211_SCAN_CLASS_MASK */
+#define WMI_SCAN_CLASS_MASK 0xFF000000
+
+
+enum wmi_stop_scan_type {
+ WMI_SCAN_STOP_ONE = 0x00000000, /* stop by scan_id */
+ WMI_SCAN_STOP_VDEV_ALL = 0x01000000, /* stop by vdev_id */
+ WMI_SCAN_STOP_ALL = 0x04000000, /* stop all scans */
+};
+
+struct wmi_stop_scan_cmd {
+ __le32 scan_req_id;
+ __le32 scan_id;
+ __le32 req_type;
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_stop_scan_arg {
+ u32 req_id;
+ enum wmi_stop_scan_type req_type;
+ union {
+ u32 scan_id;
+ u32 vdev_id;
+ } u;
+};
+
+struct wmi_scan_chan_list_cmd {
+ __le32 num_scan_chans;
+ struct wmi_channel chan_info[0];
+} __packed;
+
+struct wmi_scan_chan_list_arg {
+ u32 n_channels;
+ struct wmi_channel_arg *channels;
+};
+
+enum wmi_bss_filter {
+ WMI_BSS_FILTER_NONE = 0, /* no beacons forwarded */
+ WMI_BSS_FILTER_ALL, /* all beacons forwarded */
+ WMI_BSS_FILTER_PROFILE, /* only beacons matching profile */
+ WMI_BSS_FILTER_ALL_BUT_PROFILE, /* all but beacons matching profile */
+ WMI_BSS_FILTER_CURRENT_BSS, /* only beacons matching current BSS */
+ WMI_BSS_FILTER_ALL_BUT_BSS, /* all but beacons matching BSS */
+ WMI_BSS_FILTER_PROBED_SSID, /* beacons matching probed ssid */
+ WMI_BSS_FILTER_LAST_BSS, /* marker only */
+};
+
+enum wmi_scan_event_type {
+ WMI_SCAN_EVENT_STARTED = 0x1,
+ WMI_SCAN_EVENT_COMPLETED = 0x2,
+ WMI_SCAN_EVENT_BSS_CHANNEL = 0x4,
+ WMI_SCAN_EVENT_FOREIGN_CHANNEL = 0x8,
+ WMI_SCAN_EVENT_DEQUEUED = 0x10,
+ WMI_SCAN_EVENT_PREEMPTED = 0x20, /* possibly by high-prio scan */
+ WMI_SCAN_EVENT_START_FAILED = 0x40,
+ WMI_SCAN_EVENT_RESTARTED = 0x80,
+ WMI_SCAN_EVENT_MAX = 0x8000
+};
+
+enum wmi_scan_completion_reason {
+ WMI_SCAN_REASON_COMPLETED,
+ WMI_SCAN_REASON_CANCELLED,
+ WMI_SCAN_REASON_PREEMPTED,
+ WMI_SCAN_REASON_TIMEDOUT,
+ WMI_SCAN_REASON_MAX,
+};
+
+struct wmi_scan_event {
+ __le32 event_type; /* %WMI_SCAN_EVENT_ */
+ __le32 reason; /* %WMI_SCAN_REASON_ */
+ __le32 channel_freq; /* only valid for WMI_SCAN_EVENT_FOREIGN_CHANNEL */
+ __le32 scan_req_id;
+ __le32 scan_id;
+ __le32 vdev_id;
+} __packed;
+
+/*
+ * This defines how much headroom is kept in the
+ * receive frame between the descriptor and the
+ * payload, in order for the WMI PHY error and
+ * management handler to insert header contents.
+ *
+ * This is in bytes.
+ */
+#define WMI_MGMT_RX_HDR_HEADROOM 52
+
+/*
+ * This event will be used for sending scan results
+ * as well as rx mgmt frames to the host. The rx buffer
+ * will be sent as part of this WMI event. It would be a
+ * good idea to pass all the fields in the RX status
+ * descriptor up to the host.
+ */
+struct wmi_mgmt_rx_hdr {
+ __le32 channel;
+ __le32 snr;
+ __le32 rate;
+ __le32 phy_mode;
+ __le32 buf_len;
+ __le32 status; /* %WMI_RX_STATUS_ */
+} __packed;
+
+struct wmi_mgmt_rx_event {
+ struct wmi_mgmt_rx_hdr hdr;
+ u8 buf[0];
+} __packed;
+
+#define WMI_RX_STATUS_OK 0x00
+#define WMI_RX_STATUS_ERR_CRC 0x01
+#define WMI_RX_STATUS_ERR_DECRYPT 0x08
+#define WMI_RX_STATUS_ERR_MIC 0x10
+#define WMI_RX_STATUS_ERR_KEY_CACHE_MISS 0x20
+
+struct wmi_single_phyerr_rx_hdr {
+ /* TSF timestamp */
+ __le32 tsf_timestamp;
+
+ /*
+ * Current freq1, freq2
+ *
+ * [7:0]: freq1[lo]
+ * [15:8] : freq1[hi]
+ * [23:16]: freq2[lo]
+ * [31:24]: freq2[hi]
+ */
+ __le16 freq1;
+ __le16 freq2;
+
+ /*
+ * Combined RSSI over all chains and channel width for this PHY error
+ *
+ * [7:0]: RSSI combined
+ * [15:8]: Channel width (MHz)
+ * [23:16]: PHY error code
+ * [24:16]: reserved (future use)
+ */
+ u8 rssi_combined;
+ u8 chan_width_mhz;
+ u8 phy_err_code;
+ u8 rsvd0;
+
+ /*
+ * RSSI on chain 0 through 3
+ *
+ * This is formatted the same as the PPDU_START RX descriptor
+ * field:
+ *
+ * [7:0]: pri20
+ * [15:8]: sec20
+ * [23:16]: sec40
+ * [31:24]: sec80
+ */
+
+ __le32 rssi_chain0;
+ __le32 rssi_chain1;
+ __le32 rssi_chain2;
+ __le32 rssi_chain3;
+
+ /*
+ * Last calibrated NF value for chain 0 through 3
+ *
+ * nf_list_1:
+ *
+ * + [15:0] - chain 0
+ * + [31:16] - chain 1
+ *
+ * nf_list_2:
+ *
+ * + [15:0] - chain 2
+ * + [31:16] - chain 3
+ */
+ __le32 nf_list_1;
+ __le32 nf_list_2;
+
+
+ /* Length of the frame */
+ __le32 buf_len;
+} __packed;
+
+struct wmi_single_phyerr_rx_event {
+ /* Phy error event header */
+ struct wmi_single_phyerr_rx_hdr hdr;
+ /* frame buffer */
+ u8 bufp[0];
+} __packed;
+
+struct wmi_comb_phyerr_rx_hdr {
+ /* Phy error phy error count */
+ __le32 num_phyerr_events;
+ __le32 tsf_l32;
+ __le32 tsf_u32;
+} __packed;
+
+struct wmi_comb_phyerr_rx_event {
+ /* Phy error phy error count */
+ struct wmi_comb_phyerr_rx_hdr hdr;
+ /*
+ * frame buffer - contains multiple payloads in the order:
+ * header - payload, header - payload...
+ * (The header is of type: wmi_single_phyerr_rx_hdr)
+ */
+ u8 bufp[0];
+} __packed;
+
+struct wmi_mgmt_tx_hdr {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 tx_rate;
+ __le32 tx_power;
+ __le32 buf_len;
+} __packed;
+
+struct wmi_mgmt_tx_cmd {
+ struct wmi_mgmt_tx_hdr hdr;
+ u8 buf[0];
+} __packed;
+
+struct wmi_echo_event {
+ __le32 value;
+} __packed;
+
+struct wmi_echo_cmd {
+ __le32 value;
+} __packed;
+
+
+struct wmi_pdev_set_regdomain_cmd {
+ __le32 reg_domain;
+ __le32 reg_domain_2G;
+ __le32 reg_domain_5G;
+ __le32 conformance_test_limit_2G;
+ __le32 conformance_test_limit_5G;
+} __packed;
+
+/* Command to set/unset chip in quiet mode */
+struct wmi_pdev_set_quiet_cmd {
+ /* period in TUs */
+ __le32 period;
+
+ /* duration in TUs */
+ __le32 duration;
+
+ /* offset in TUs */
+ __le32 next_start;
+
+ /* enable/disable */
+ __le32 enabled;
+} __packed;
+
+
+/*
+ * 802.11g protection mode.
+ */
+enum ath10k_protmode {
+ ATH10K_PROT_NONE = 0, /* no protection */
+ ATH10K_PROT_CTSONLY = 1, /* CTS to self */
+ ATH10K_PROT_RTSCTS = 2, /* RTS-CTS */
+};
+
+enum wmi_beacon_gen_mode {
+ WMI_BEACON_STAGGERED_MODE = 0,
+ WMI_BEACON_BURST_MODE = 1
+};
+
+enum wmi_csa_event_ies_present_flag {
+ WMI_CSA_IE_PRESENT = 0x00000001,
+ WMI_XCSA_IE_PRESENT = 0x00000002,
+ WMI_WBW_IE_PRESENT = 0x00000004,
+ WMI_CSWARP_IE_PRESENT = 0x00000008,
+};
+
+/* wmi CSA receive event from beacon frame */
+struct wmi_csa_event {
+ __le32 i_fc_dur;
+ /* Bit 0-15: FC */
+ /* Bit 16-31: DUR */
+ struct wmi_mac_addr i_addr1;
+ struct wmi_mac_addr i_addr2;
+ __le32 csa_ie[2];
+ __le32 xcsa_ie[2];
+ __le32 wb_ie[2];
+ __le32 cswarp_ie;
+ __le32 ies_present_flag; /* wmi_csa_event_ies_present_flag */
+} __packed;
+
+/* the definition of different PDEV parameters */
+#define PDEV_DEFAULT_STATS_UPDATE_PERIOD 500
+#define VDEV_DEFAULT_STATS_UPDATE_PERIOD 500
+#define PEER_DEFAULT_STATS_UPDATE_PERIOD 500
+
+enum wmi_pdev_param {
+ /* TX chian mask */
+ WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+ /* RX chian mask */
+ WMI_PDEV_PARAM_RX_CHAIN_MASK,
+ /* TX power limit for 2G Radio */
+ WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
+ /* TX power limit for 5G Radio */
+ WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
+ /* TX power scale */
+ WMI_PDEV_PARAM_TXPOWER_SCALE,
+ /* Beacon generation mode . 0: host, 1: target */
+ WMI_PDEV_PARAM_BEACON_GEN_MODE,
+ /* Beacon generation mode . 0: staggered 1: bursted */
+ WMI_PDEV_PARAM_BEACON_TX_MODE,
+ /*
+ * Resource manager off chan mode .
+ * 0: turn off off chan mode. 1: turn on offchan mode
+ */
+ WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ /*
+ * Protection mode:
+ * 0: no protection 1:use CTS-to-self 2: use RTS/CTS
+ */
+ WMI_PDEV_PARAM_PROTECTION_MODE,
+ /* Dynamic bandwidth 0: disable 1: enable */
+ WMI_PDEV_PARAM_DYNAMIC_BW,
+ /* Non aggregrate/ 11g sw retry threshold.0-disable */
+ WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ /* aggregrate sw retry threshold. 0-disable*/
+ WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
+ /* Station kickout threshold (non of consecutive failures).0-disable */
+ WMI_PDEV_PARAM_STA_KICKOUT_TH,
+ /* Aggerate size scaling configuration per AC */
+ WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ /* LTR enable */
+ WMI_PDEV_PARAM_LTR_ENABLE,
+ /* LTR latency for BE, in us */
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ /* LTR latency for BK, in us */
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ /* LTR latency for VI, in us */
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ /* LTR latency for VO, in us */
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ /* LTR AC latency timeout, in ms */
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ /* LTR platform latency override, in us */
+ WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ /* LTR-RX override, in us */
+ WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
+ /* Tx activity timeout for LTR, in us */
+ WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ /* L1SS state machine enable */
+ WMI_PDEV_PARAM_L1SS_ENABLE,
+ /* Deep sleep state machine enable */
+ WMI_PDEV_PARAM_DSLEEP_ENABLE,
+ /* RX buffering flush enable */
+ WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ /* RX buffering matermark */
+ WMI_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+ /* RX buffering timeout enable */
+ WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ /* RX buffering timeout value */
+ WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ /* pdev level stats update period in ms */
+ WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ /* vdev level stats update period in ms */
+ WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ /* peer level stats update period in ms */
+ WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ /* beacon filter status update period */
+ WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ /* QOS Mgmt frame protection MFP/PMF 0: disable, 1: enable */
+ WMI_PDEV_PARAM_PMF_QOS,
+ /* Access category on which ARP frames are sent */
+ WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+ /* DCS configuration */
+ WMI_PDEV_PARAM_DCS,
+ /* Enable/Disable ANI on target */
+ WMI_PDEV_PARAM_ANI_ENABLE,
+ /* configure the ANI polling period */
+ WMI_PDEV_PARAM_ANI_POLL_PERIOD,
+ /* configure the ANI listening period */
+ WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ /* configure OFDM immunity level */
+ WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
+ /* configure CCK immunity level */
+ WMI_PDEV_PARAM_ANI_CCK_LEVEL,
+ /* Enable/Disable CDD for 1x1 STAs in rate control module */
+ WMI_PDEV_PARAM_DYNTXCHAIN,
+ /* Enable/Disable proxy STA */
+ WMI_PDEV_PARAM_PROXY_STA,
+ /* Enable/Disable low power state when all VDEVs are inactive/idle. */
+ WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+ /* Enable/Disable power gating sleep */
+ WMI_PDEV_PARAM_POWER_GATING_SLEEP,
+};
+
+struct wmi_pdev_set_param_cmd {
+ __le32 param_id;
+ __le32 param_value;
+} __packed;
+
+struct wmi_pdev_get_tpc_config_cmd {
+ /* parameter */
+ __le32 param;
+} __packed;
+
+#define WMI_TPC_RATE_MAX 160
+#define WMI_TPC_TX_N_CHAIN 4
+
+enum wmi_tpc_config_event_flag {
+ WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD = 0x1,
+ WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC = 0x2,
+ WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF = 0x4,
+};
+
+struct wmi_pdev_tpc_config_event {
+ __le32 reg_domain;
+ __le32 chan_freq;
+ __le32 phy_mode;
+ __le32 twice_antenna_reduction;
+ __le32 twice_max_rd_power;
+ s32 twice_antenna_gain;
+ __le32 power_limit;
+ __le32 rate_max;
+ __le32 num_tx_chain;
+ __le32 ctl;
+ __le32 flags;
+ s8 max_reg_allow_pow[WMI_TPC_TX_N_CHAIN];
+ s8 max_reg_allow_pow_agcdd[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN];
+ s8 max_reg_allow_pow_agstbc[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN];
+ s8 max_reg_allow_pow_agtxbf[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN];
+ u8 rates_array[WMI_TPC_RATE_MAX];
+} __packed;
+
+/* Transmit power scale factor. */
+enum wmi_tp_scale {
+ WMI_TP_SCALE_MAX = 0, /* no scaling (default) */
+ WMI_TP_SCALE_50 = 1, /* 50% of max (-3 dBm) */
+ WMI_TP_SCALE_25 = 2, /* 25% of max (-6 dBm) */
+ WMI_TP_SCALE_12 = 3, /* 12% of max (-9 dBm) */
+ WMI_TP_SCALE_MIN = 4, /* min, but still on */
+ WMI_TP_SCALE_SIZE = 5, /* max num of enum */
+};
+
+struct wmi_set_channel_cmd {
+ /* channel (only frequency and mode info are used) */
+ struct wmi_channel chan;
+} __packed;
+
+struct wmi_pdev_chanlist_update_event {
+ /* number of channels */
+ __le32 num_chan;
+ /* array of channels */
+ struct wmi_channel channel_list[1];
+} __packed;
+
+#define WMI_MAX_DEBUG_MESG (sizeof(u32) * 32)
+
+struct wmi_debug_mesg_event {
+ /* message buffer, NULL terminated */
+ char bufp[WMI_MAX_DEBUG_MESG];
+} __packed;
+
+enum {
+ /* P2P device */
+ VDEV_SUBTYPE_P2PDEV = 0,
+ /* P2P client */
+ VDEV_SUBTYPE_P2PCLI,
+ /* P2P GO */
+ VDEV_SUBTYPE_P2PGO,
+ /* BT3.0 HS */
+ VDEV_SUBTYPE_BT,
+};
+
+struct wmi_pdev_set_channel_cmd {
+ /* idnore power , only use flags , mode and freq */
+ struct wmi_channel chan;
+} __packed;
+
+/* Customize the DSCP (bit) to TID (0-7) mapping for QOS */
+#define WMI_DSCP_MAP_MAX (64)
+struct wmi_pdev_set_dscp_tid_map_cmd {
+ /* map indicating DSCP to TID conversion */
+ __le32 dscp_to_tid_map[WMI_DSCP_MAP_MAX];
+} __packed;
+
+enum mcast_bcast_rate_id {
+ WMI_SET_MCAST_RATE,
+ WMI_SET_BCAST_RATE
+};
+
+struct mcast_bcast_rate {
+ enum mcast_bcast_rate_id rate_id;
+ __le32 rate;
+} __packed;
+
+struct wmi_wmm_params {
+ __le32 cwmin;
+ __le32 cwmax;
+ __le32 aifs;
+ __le32 txop;
+ __le32 acm;
+ __le32 no_ack;
+} __packed;
+
+struct wmi_pdev_set_wmm_params {
+ struct wmi_wmm_params ac_be;
+ struct wmi_wmm_params ac_bk;
+ struct wmi_wmm_params ac_vi;
+ struct wmi_wmm_params ac_vo;
+} __packed;
+
+struct wmi_wmm_params_arg {
+ u32 cwmin;
+ u32 cwmax;
+ u32 aifs;
+ u32 txop;
+ u32 acm;
+ u32 no_ack;
+};
+
+struct wmi_pdev_set_wmm_params_arg {
+ struct wmi_wmm_params_arg ac_be;
+ struct wmi_wmm_params_arg ac_bk;
+ struct wmi_wmm_params_arg ac_vi;
+ struct wmi_wmm_params_arg ac_vo;
+};
+
+struct wal_dbg_tx_stats {
+ /* Num HTT cookies queued to dispatch list */
+ __le32 comp_queued;
+
+ /* Num HTT cookies dispatched */
+ __le32 comp_delivered;
+
+ /* Num MSDU queued to WAL */
+ __le32 msdu_enqued;
+
+ /* Num MPDU queue to WAL */
+ __le32 mpdu_enqued;
+
+ /* Num MSDUs dropped by WMM limit */
+ __le32 wmm_drop;
+
+ /* Num Local frames queued */
+ __le32 local_enqued;
+
+ /* Num Local frames done */
+ __le32 local_freed;
+
+ /* Num queued to HW */
+ __le32 hw_queued;
+
+ /* Num PPDU reaped from HW */
+ __le32 hw_reaped;
+
+ /* Num underruns */
+ __le32 underrun;
+
+ /* Num PPDUs cleaned up in TX abort */
+ __le32 tx_abort;
+
+ /* Num MPDUs requed by SW */
+ __le32 mpdus_requed;
+
+ /* excessive retries */
+ __le32 tx_ko;
+
+ /* data hw rate code */
+ __le32 data_rc;
+
+ /* Scheduler self triggers */
+ __le32 self_triggers;
+
+ /* frames dropped due to excessive sw retries */
+ __le32 sw_retry_failure;
+
+ /* illegal rate phy errors */
+ __le32 illgl_rate_phy_err;
+
+ /* wal pdev continous xretry */
+ __le32 pdev_cont_xretry;
+
+ /* wal pdev continous xretry */
+ __le32 pdev_tx_timeout;
+
+ /* wal pdev resets */
+ __le32 pdev_resets;
+
+ __le32 phy_underrun;
+
+ /* MPDU is more than txop limit */
+ __le32 txop_ovf;
+} __packed;
+
+struct wal_dbg_rx_stats {
+ /* Cnts any change in ring routing mid-ppdu */
+ __le32 mid_ppdu_route_change;
+
+ /* Total number of statuses processed */
+ __le32 status_rcvd;
+
+ /* Extra frags on rings 0-3 */
+ __le32 r0_frags;
+ __le32 r1_frags;
+ __le32 r2_frags;
+ __le32 r3_frags;
+
+ /* MSDUs / MPDUs delivered to HTT */
+ __le32 htt_msdus;
+ __le32 htt_mpdus;
+
+ /* MSDUs / MPDUs delivered to local stack */
+ __le32 loc_msdus;
+ __le32 loc_mpdus;
+
+ /* AMSDUs that have more MSDUs than the status ring size */
+ __le32 oversize_amsdu;
+
+ /* Number of PHY errors */
+ __le32 phy_errs;
+
+ /* Number of PHY errors drops */
+ __le32 phy_err_drop;
+
+ /* Number of mpdu errors - FCS, MIC, ENC etc. */
+ __le32 mpdu_errs;
+} __packed;
+
+struct wal_dbg_peer_stats {
+ /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */
+ __le32 dummy;
+} __packed;
+
+struct wal_dbg_stats {
+ struct wal_dbg_tx_stats tx;
+ struct wal_dbg_rx_stats rx;
+ struct wal_dbg_peer_stats peer;
+} __packed;
+
+enum wmi_stats_id {
+ WMI_REQUEST_PEER_STAT = 0x01,
+ WMI_REQUEST_AP_STAT = 0x02
+};
+
+struct wmi_request_stats_cmd {
+ __le32 stats_id;
+
+ /*
+ * Space to add parameters like
+ * peer mac addr
+ */
+} __packed;
+
+/* Suspend option */
+enum {
+ /* suspend */
+ WMI_PDEV_SUSPEND,
+
+ /* suspend and disable all interrupts */
+ WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
+};
+
+struct wmi_pdev_suspend_cmd {
+ /* suspend option sent to target */
+ __le32 suspend_opt;
+} __packed;
+
+struct wmi_stats_event {
+ __le32 stats_id; /* %WMI_REQUEST_ */
+ /*
+ * number of pdev stats event structures
+ * (wmi_pdev_stats) 0 or 1
+ */
+ __le32 num_pdev_stats;
+ /*
+ * number of vdev stats event structures
+ * (wmi_vdev_stats) 0 or max vdevs
+ */
+ __le32 num_vdev_stats;
+ /*
+ * number of peer stats event structures
+ * (wmi_peer_stats) 0 or max peers
+ */
+ __le32 num_peer_stats;
+ __le32 num_bcnflt_stats;
+ /*
+ * followed by
+ * num_pdev_stats * size of(struct wmi_pdev_stats)
+ * num_vdev_stats * size of(struct wmi_vdev_stats)
+ * num_peer_stats * size of(struct wmi_peer_stats)
+ *
+ * By having a zero sized array, the pointer to data area
+ * becomes available without increasing the struct size
+ */
+ u8 data[0];
+} __packed;
+
+/*
+ * PDEV statistics
+ * TODO: add all PDEV stats here
+ */
+struct wmi_pdev_stats {
+ __le32 chan_nf; /* Channel noise floor */
+ __le32 tx_frame_count; /* TX frame count */
+ __le32 rx_frame_count; /* RX frame count */
+ __le32 rx_clear_count; /* rx clear count */
+ __le32 cycle_count; /* cycle count */
+ __le32 phy_err_count; /* Phy error count */
+ __le32 chan_tx_pwr; /* channel tx power */
+ struct wal_dbg_stats wal; /* WAL dbg stats */
+} __packed;
+
+/*
+ * VDEV statistics
+ * TODO: add all VDEV stats here
+ */
+struct wmi_vdev_stats {
+ __le32 vdev_id;
+} __packed;
+
+/*
+ * peer statistics.
+ * TODO: add more stats
+ */
+struct wmi_peer_stats {
+ struct wmi_mac_addr peer_macaddr;
+ __le32 peer_rssi;
+ __le32 peer_tx_rate;
+} __packed;
+
+struct wmi_vdev_create_cmd {
+ __le32 vdev_id;
+ __le32 vdev_type;
+ __le32 vdev_subtype;
+ struct wmi_mac_addr vdev_macaddr;
+} __packed;
+
+enum wmi_vdev_type {
+ WMI_VDEV_TYPE_AP = 1,
+ WMI_VDEV_TYPE_STA = 2,
+ WMI_VDEV_TYPE_IBSS = 3,
+ WMI_VDEV_TYPE_MONITOR = 4,
+};
+
+enum wmi_vdev_subtype {
+ WMI_VDEV_SUBTYPE_NONE = 0,
+ WMI_VDEV_SUBTYPE_P2P_DEVICE = 1,
+ WMI_VDEV_SUBTYPE_P2P_CLIENT = 2,
+ WMI_VDEV_SUBTYPE_P2P_GO = 3,
+};
+
+/* values for vdev_subtype */
+
+/* values for vdev_start_request flags */
+/*
+ * Indicates that AP VDEV uses hidden ssid. only valid for
+ * AP/GO */
+#define WMI_VDEV_START_HIDDEN_SSID (1<<0)
+/*
+ * Indicates if robust management frame/management frame
+ * protection is enabled. For GO/AP vdevs, it indicates that
+ * it may support station/client associations with RMF enabled.
+ * For STA/client vdevs, it indicates that sta will
+ * associate with AP with RMF enabled. */
+#define WMI_VDEV_START_PMF_ENABLED (1<<1)
+
+struct wmi_p2p_noa_descriptor {
+ __le32 type_count; /* 255: continuous schedule, 0: reserved */
+ __le32 duration; /* Absent period duration in micro seconds */
+ __le32 interval; /* Absent period interval in micro seconds */
+ __le32 start_time; /* 32 bit tsf time when in starts */
+} __packed;
+
+struct wmi_vdev_start_request_cmd {
+ /* WMI channel */
+ struct wmi_channel chan;
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* requestor id identifying the caller module */
+ __le32 requestor_id;
+ /* beacon interval from received beacon */
+ __le32 beacon_interval;
+ /* DTIM Period from the received beacon */
+ __le32 dtim_period;
+ /* Flags */
+ __le32 flags;
+ /* ssid field. Only valid for AP/GO/IBSS/BTAmp VDEV type. */
+ struct wmi_ssid ssid;
+ /* beacon/probe reponse xmit rate. Applicable for SoftAP. */
+ __le32 bcn_tx_rate;
+ /* beacon/probe reponse xmit power. Applicable for SoftAP. */
+ __le32 bcn_tx_power;
+ /* number of p2p NOA descriptor(s) from scan entry */
+ __le32 num_noa_descriptors;
+ /*
+ * Disable H/W ack. This used by WMI_VDEV_RESTART_REQUEST_CMDID.
+ * During CAC, Our HW shouldn't ack ditected frames
+ */
+ __le32 disable_hw_ack;
+ /* actual p2p NOA descriptor from scan entry */
+ struct wmi_p2p_noa_descriptor noa_descriptors[2];
+} __packed;
+
+struct wmi_vdev_restart_request_cmd {
+ struct wmi_vdev_start_request_cmd vdev_start_request_cmd;
+} __packed;
+
+struct wmi_vdev_start_request_arg {
+ u32 vdev_id;
+ struct wmi_channel_arg channel;
+ u32 bcn_intval;
+ u32 dtim_period;
+ u8 *ssid;
+ u32 ssid_len;
+ u32 bcn_tx_rate;
+ u32 bcn_tx_power;
+ bool disable_hw_ack;
+ bool hidden_ssid;
+ bool pmf_enabled;
+};
+
+struct wmi_vdev_delete_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_up_cmd {
+ __le32 vdev_id;
+ __le32 vdev_assoc_id;
+ struct wmi_mac_addr vdev_bssid;
+} __packed;
+
+struct wmi_vdev_stop_cmd {
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_down_cmd {
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_standby_response_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_resume_response_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_set_param_cmd {
+ __le32 vdev_id;
+ __le32 param_id;
+ __le32 param_value;
+} __packed;
+
+#define WMI_MAX_KEY_INDEX 3
+#define WMI_MAX_KEY_LEN 32
+
+#define WMI_KEY_PAIRWISE 0x00
+#define WMI_KEY_GROUP 0x01
+#define WMI_KEY_TX_USAGE 0x02 /* default tx key - static wep */
+
+struct wmi_key_seq_counter {
+ __le32 key_seq_counter_l;
+ __le32 key_seq_counter_h;
+} __packed;
+
+#define WMI_CIPHER_NONE 0x0 /* clear key */
+#define WMI_CIPHER_WEP 0x1
+#define WMI_CIPHER_TKIP 0x2
+#define WMI_CIPHER_AES_OCB 0x3
+#define WMI_CIPHER_AES_CCM 0x4
+#define WMI_CIPHER_WAPI 0x5
+#define WMI_CIPHER_CKIP 0x6
+#define WMI_CIPHER_AES_CMAC 0x7
+
+struct wmi_vdev_install_key_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 key_idx;
+ __le32 key_flags;
+ __le32 key_cipher; /* %WMI_CIPHER_ */
+ struct wmi_key_seq_counter key_rsc_counter;
+ struct wmi_key_seq_counter key_global_rsc_counter;
+ struct wmi_key_seq_counter key_tsc_counter;
+ u8 wpi_key_rsc_counter[16];
+ u8 wpi_key_tsc_counter[16];
+ __le32 key_len;
+ __le32 key_txmic_len;
+ __le32 key_rxmic_len;
+
+ /* contains key followed by tx mic followed by rx mic */
+ u8 key_data[0];
+} __packed;
+
+struct wmi_vdev_install_key_arg {
+ u32 vdev_id;
+ const u8 *macaddr;
+ u32 key_idx;
+ u32 key_flags;
+ u32 key_cipher;
+ u32 key_len;
+ u32 key_txmic_len;
+ u32 key_rxmic_len;
+ const void *key_data;
+};
+
+/* Preamble types to be used with VDEV fixed rate configuration */
+enum wmi_rate_preamble {
+ WMI_RATE_PREAMBLE_OFDM,
+ WMI_RATE_PREAMBLE_CCK,
+ WMI_RATE_PREAMBLE_HT,
+ WMI_RATE_PREAMBLE_VHT,
+};
+
+/* Value to disable fixed rate setting */
+#define WMI_FIXED_RATE_NONE (0xff)
+
+/* the definition of different VDEV parameters */
+enum wmi_vdev_param {
+ /* RTS Threshold */
+ WMI_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+ /* Fragmentation threshold */
+ WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ /* beacon interval in TUs */
+ WMI_VDEV_PARAM_BEACON_INTERVAL,
+ /* Listen interval in TUs */
+ WMI_VDEV_PARAM_LISTEN_INTERVAL,
+ /* muticast rate in Mbps */
+ WMI_VDEV_PARAM_MULTICAST_RATE,
+ /* management frame rate in Mbps */
+ WMI_VDEV_PARAM_MGMT_TX_RATE,
+ /* slot time (long vs short) */
+ WMI_VDEV_PARAM_SLOT_TIME,
+ /* preamble (long vs short) */
+ WMI_VDEV_PARAM_PREAMBLE,
+ /* SWBA time (time before tbtt in msec) */
+ WMI_VDEV_PARAM_SWBA_TIME,
+ /* time period for updating VDEV stats */
+ WMI_VDEV_STATS_UPDATE_PERIOD,
+ /* age out time in msec for frames queued for station in power save */
+ WMI_VDEV_PWRSAVE_AGEOUT_TIME,
+ /*
+ * Host SWBA interval (time in msec before tbtt for SWBA event
+ * generation).
+ */
+ WMI_VDEV_HOST_SWBA_INTERVAL,
+ /* DTIM period (specified in units of num beacon intervals) */
+ WMI_VDEV_PARAM_DTIM_PERIOD,
+ /*
+ * scheduler air time limit for this VDEV. used by off chan
+ * scheduler.
+ */
+ WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ /* enable/dsiable WDS for this VDEV */
+ WMI_VDEV_PARAM_WDS,
+ /* ATIM Window */
+ WMI_VDEV_PARAM_ATIM_WINDOW,
+ /* BMISS max */
+ WMI_VDEV_PARAM_BMISS_COUNT_MAX,
+ /* BMISS first time */
+ WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
+ /* BMISS final time */
+ WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
+ /* WMM enables/disabled */
+ WMI_VDEV_PARAM_FEATURE_WMM,
+ /* Channel width */
+ WMI_VDEV_PARAM_CHWIDTH,
+ /* Channel Offset */
+ WMI_VDEV_PARAM_CHEXTOFFSET,
+ /* Disable HT Protection */
+ WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
+ /* Quick STA Kickout */
+ WMI_VDEV_PARAM_STA_QUICKKICKOUT,
+ /* Rate to be used with Management frames */
+ WMI_VDEV_PARAM_MGMT_RATE,
+ /* Protection Mode */
+ WMI_VDEV_PARAM_PROTECTION_MODE,
+ /* Fixed rate setting */
+ WMI_VDEV_PARAM_FIXED_RATE,
+ /* Short GI Enable/Disable */
+ WMI_VDEV_PARAM_SGI,
+ /* Enable LDPC */
+ WMI_VDEV_PARAM_LDPC,
+ /* Enable Tx STBC */
+ WMI_VDEV_PARAM_TX_STBC,
+ /* Enable Rx STBC */
+ WMI_VDEV_PARAM_RX_STBC,
+ /* Intra BSS forwarding */
+ WMI_VDEV_PARAM_INTRA_BSS_FWD,
+ /* Setting Default xmit key for Vdev */
+ WMI_VDEV_PARAM_DEF_KEYID,
+ /* NSS width */
+ WMI_VDEV_PARAM_NSS,
+ /* Set the custom rate for the broadcast data frames */
+ WMI_VDEV_PARAM_BCAST_DATA_RATE,
+ /* Set the custom rate (rate-code) for multicast data frames */
+ WMI_VDEV_PARAM_MCAST_DATA_RATE,
+ /* Tx multicast packet indicate Enable/Disable */
+ WMI_VDEV_PARAM_MCAST_INDICATE,
+ /* Tx DHCP packet indicate Enable/Disable */
+ WMI_VDEV_PARAM_DHCP_INDICATE,
+ /* Enable host inspection of Tx unicast packet to unknown destination */
+ WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+
+ /* The minimum amount of time AP begins to consider STA inactive */
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+
+ /*
+ * An associated STA is considered inactive when there is no recent
+ * TX/RX activity and no downlink frames are buffered for it. Once a
+ * STA exceeds the maximum idle inactive time, the AP will send an
+ * 802.11 data-null as a keep alive to verify the STA is still
+ * associated. If the STA does ACK the data-null, or if the data-null
+ * is buffered and the STA does not retrieve it, the STA will be
+ * considered unresponsive
+ * (see WMI_VDEV_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS).
+ */
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+
+ /*
+ * An associated STA is considered unresponsive if there is no recent
+ * TX/RX activity and downlink frames are buffered for it. Once a STA
+ * exceeds the maximum unresponsive time, the AP will send a
+ * WMI_STA_KICKOUT event to the host so the STA can be deleted. */
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+
+ /* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */
+ WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
+ /* Enable/Disable RTS-CTS */
+ WMI_VDEV_PARAM_ENABLE_RTSCTS,
+ /* Enable TXBFee/er */
+ WMI_VDEV_PARAM_TXBF,
+
+ /* Set packet power save */
+ WMI_VDEV_PARAM_PACKET_POWERSAVE,
+
+ /*
+ * Drops un-encrypted packets if eceived in an encrypted connection
+ * otherwise forwards to host.
+ */
+ WMI_VDEV_PARAM_DROP_UNENCRY,
+
+ /*
+ * Set the encapsulation type for frames.
+ */
+ WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+};
+
+/* slot time long */
+#define WMI_VDEV_SLOT_TIME_LONG 0x1
+/* slot time short */
+#define WMI_VDEV_SLOT_TIME_SHORT 0x2
+/* preablbe long */
+#define WMI_VDEV_PREAMBLE_LONG 0x1
+/* preablbe short */
+#define WMI_VDEV_PREAMBLE_SHORT 0x2
+
+enum wmi_start_event_param {
+ WMI_VDEV_RESP_START_EVENT = 0,
+ WMI_VDEV_RESP_RESTART_EVENT,
+};
+
+struct wmi_vdev_start_response_event {
+ __le32 vdev_id;
+ __le32 req_id;
+ __le32 resp_type; /* %WMI_VDEV_RESP_ */
+ __le32 status;
+} __packed;
+
+struct wmi_vdev_standby_req_event {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_resume_req_event {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_stopped_event {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+} __packed;
+
+/*
+ * common structure used for simple events
+ * (stopped, resume_req, standby response)
+ */
+struct wmi_vdev_simple_event {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+} __packed;
+
+/* VDEV start response status codes */
+/* VDEV succesfully started */
+#define WMI_INIFIED_VDEV_START_RESPONSE_STATUS_SUCCESS 0x0
+
+/* requested VDEV not found */
+#define WMI_INIFIED_VDEV_START_RESPONSE_INVALID_VDEVID 0x1
+
+/* unsupported VDEV combination */
+#define WMI_INIFIED_VDEV_START_RESPONSE_NOT_SUPPORTED 0x2
+
+/* Beacon processing related command and event structures */
+struct wmi_bcn_tx_hdr {
+ __le32 vdev_id;
+ __le32 tx_rate;
+ __le32 tx_power;
+ __le32 bcn_len;
+} __packed;
+
+struct wmi_bcn_tx_cmd {
+ struct wmi_bcn_tx_hdr hdr;
+ u8 *bcn[0];
+} __packed;
+
+struct wmi_bcn_tx_arg {
+ u32 vdev_id;
+ u32 tx_rate;
+ u32 tx_power;
+ u32 bcn_len;
+ const void *bcn;
+};
+
+/* Beacon filter */
+#define WMI_BCN_FILTER_ALL 0 /* Filter all beacons */
+#define WMI_BCN_FILTER_NONE 1 /* Pass all beacons */
+#define WMI_BCN_FILTER_RSSI 2 /* Pass Beacons RSSI >= RSSI threshold */
+#define WMI_BCN_FILTER_BSSID 3 /* Pass Beacons with matching BSSID */
+#define WMI_BCN_FILTER_SSID 4 /* Pass Beacons with matching SSID */
+
+struct wmi_bcn_filter_rx_cmd {
+ /* Filter ID */
+ __le32 bcn_filter_id;
+ /* Filter type - wmi_bcn_filter */
+ __le32 bcn_filter;
+ /* Buffer len */
+ __le32 bcn_filter_len;
+ /* Filter info (threshold, BSSID, RSSI) */
+ u8 *bcn_filter_buf;
+} __packed;
+
+/* Capabilities and IEs to be passed to firmware */
+struct wmi_bcn_prb_info {
+ /* Capabilities */
+ __le32 caps;
+ /* ERP info */
+ __le32 erp;
+ /* Advanced capabilities */
+ /* HT capabilities */
+ /* HT Info */
+ /* ibss_dfs */
+ /* wpa Info */
+ /* rsn Info */
+ /* rrm info */
+ /* ath_ext */
+ /* app IE */
+} __packed;
+
+struct wmi_bcn_tmpl_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* TIM IE offset from the beginning of the template. */
+ __le32 tim_ie_offset;
+ /* beacon probe capabilities and IEs */
+ struct wmi_bcn_prb_info bcn_prb_info;
+ /* beacon buffer length */
+ __le32 buf_len;
+ /* variable length data */
+ u8 data[1];
+} __packed;
+
+struct wmi_prb_tmpl_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* beacon probe capabilities and IEs */
+ struct wmi_bcn_prb_info bcn_prb_info;
+ /* beacon buffer length */
+ __le32 buf_len;
+ /* Variable length data */
+ u8 data[1];
+} __packed;
+
+enum wmi_sta_ps_mode {
+ /* enable power save for the given STA VDEV */
+ WMI_STA_PS_MODE_DISABLED = 0,
+ /* disable power save for a given STA VDEV */
+ WMI_STA_PS_MODE_ENABLED = 1,
+};
+
+struct wmi_sta_powersave_mode_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+
+ /*
+ * Power save mode
+ * (see enum wmi_sta_ps_mode)
+ */
+ __le32 sta_ps_mode;
+} __packed;
+
+enum wmi_csa_offload_en {
+ WMI_CSA_OFFLOAD_DISABLE = 0,
+ WMI_CSA_OFFLOAD_ENABLE = 1,
+};
+
+struct wmi_csa_offload_enable_cmd {
+ __le32 vdev_id;
+ __le32 csa_offload_enable;
+} __packed;
+
+struct wmi_csa_offload_chanswitch_cmd {
+ __le32 vdev_id;
+ struct wmi_channel chan;
+} __packed;
+
+/*
+ * This parameter controls the policy for retrieving frames from AP while the
+ * STA is in sleep state.
+ *
+ * Only takes affect if the sta_ps_mode is enabled
+ */
+enum wmi_sta_ps_param_rx_wake_policy {
+ /*
+ * Wake up when ever there is an RX activity on the VDEV. In this mode
+ * the Power save SM(state machine) will come out of sleep by either
+ * sending null frame (or) a data frame (with PS==0) in response to TIM
+ * bit set in the received beacon frame from AP.
+ */
+ WMI_STA_PS_RX_WAKE_POLICY_WAKE = 0,
+
+ /*
+ * Here the power save state machine will not wakeup in response to TIM
+ * bit, instead it will send a PSPOLL (or) UASPD trigger based on UAPSD
+ * configuration setup by WMISET_PS_SET_UAPSD WMI command. When all
+ * access categories are delivery-enabled, the station will send a
+ * UAPSD trigger frame, otherwise it will send a PS-Poll.
+ */
+ WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD = 1,
+};
+
+/*
+ * Number of tx frames/beacon that cause the power save SM to wake up.
+ *
+ * Value 1 causes the SM to wake up for every TX. Value 0 has a special
+ * meaning, It will cause the SM to never wake up. This is useful if you want
+ * to keep the system to sleep all the time for some kind of test mode . host
+ * can change this parameter any time. It will affect at the next tx frame.
+ */
+enum wmi_sta_ps_param_tx_wake_threshold {
+ WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER = 0,
+ WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS = 1,
+
+ /*
+ * Values greater than one indicate that many TX attempts per beacon
+ * interval before the STA will wake up
+ */
+};
+
+/*
+ * The maximum number of PS-Poll frames the FW will send in response to
+ * traffic advertised in TIM before waking up (by sending a null frame with PS
+ * = 0). Value 0 has a special meaning: there is no maximum count and the FW
+ * will send as many PS-Poll as are necessary to retrieve buffered BU. This
+ * parameter is used when the RX wake policy is
+ * WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD and ignored when the RX wake
+ * policy is WMI_STA_PS_RX_WAKE_POLICY_WAKE.
+ */
+enum wmi_sta_ps_param_pspoll_count {
+ WMI_STA_PS_PSPOLL_COUNT_NO_MAX = 0,
+ /*
+ * Values greater than 0 indicate the maximum numer of PS-Poll frames
+ * FW will send before waking up.
+ */
+};
+
+/*
+ * This will include the delivery and trigger enabled state for every AC.
+ * This is the negotiated state with AP. The host MLME needs to set this based
+ * on AP capability and the state Set in the association request by the
+ * station MLME.Lower 8 bits of the value specify the UAPSD configuration.
+ */
+#define WMI_UAPSD_AC_TYPE_DELI 0
+#define WMI_UAPSD_AC_TYPE_TRIG 1
+
+#define WMI_UAPSD_AC_BIT_MASK(ac, type) \
+ ((type == WMI_UAPSD_AC_TYPE_DELI) ? (1<<(ac<<1)) : (1<<((ac<<1)+1)))
+
+enum wmi_sta_ps_param_uapsd {
+ WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
+ WMI_STA_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1),
+ WMI_STA_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
+ WMI_STA_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3),
+ WMI_STA_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
+ WMI_STA_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5),
+ WMI_STA_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
+ WMI_STA_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7),
+};
+
+enum wmi_sta_powersave_param {
+ /*
+ * Controls how frames are retrievd from AP while STA is sleeping
+ *
+ * (see enum wmi_sta_ps_param_rx_wake_policy)
+ */
+ WMI_STA_PS_PARAM_RX_WAKE_POLICY = 0,
+
+ /*
+ * The STA will go active after this many TX
+ *
+ * (see enum wmi_sta_ps_param_tx_wake_threshold)
+ */
+ WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD = 1,
+
+ /*
+ * Number of PS-Poll to send before STA wakes up
+ *
+ * (see enum wmi_sta_ps_param_pspoll_count)
+ *
+ */
+ WMI_STA_PS_PARAM_PSPOLL_COUNT = 2,
+
+ /*
+ * TX/RX inactivity time in msec before going to sleep.
+ *
+ * The power save SM will monitor tx/rx activity on the VDEV, if no
+ * activity for the specified msec of the parameter the Power save
+ * SM will go to sleep.
+ */
+ WMI_STA_PS_PARAM_INACTIVITY_TIME = 3,
+
+ /*
+ * Set uapsd configuration.
+ *
+ * (see enum wmi_sta_ps_param_uapsd)
+ */
+ WMI_STA_PS_PARAM_UAPSD = 4,
+};
+
+struct wmi_sta_powersave_param_cmd {
+ __le32 vdev_id;
+ __le32 param_id; /* %WMI_STA_PS_PARAM_ */
+ __le32 param_value;
+} __packed;
+
+/* No MIMO power save */
+#define WMI_STA_MIMO_PS_MODE_DISABLE
+/* mimo powersave mode static*/
+#define WMI_STA_MIMO_PS_MODE_STATIC
+/* mimo powersave mode dynamic */
+#define WMI_STA_MIMO_PS_MODE_DYNAMIC
+
+struct wmi_sta_mimo_ps_mode_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* mimo powersave mode as defined above */
+ __le32 mimo_pwrsave_mode;
+} __packed;
+
+/* U-APSD configuration of peer station from (re)assoc request and TSPECs */
+enum wmi_ap_ps_param_uapsd {
+ WMI_AP_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
+ WMI_AP_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1),
+ WMI_AP_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
+ WMI_AP_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3),
+ WMI_AP_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
+ WMI_AP_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5),
+ WMI_AP_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
+ WMI_AP_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7),
+};
+
+/* U-APSD maximum service period of peer station */
+enum wmi_ap_ps_peer_param_max_sp {
+ WMI_AP_PS_PEER_PARAM_MAX_SP_UNLIMITED = 0,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_2 = 1,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_4 = 2,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_6 = 3,
+ MAX_WMI_AP_PS_PEER_PARAM_MAX_SP,
+};
+
+/*
+ * AP power save parameter
+ * Set a power save specific parameter for a peer station
+ */
+enum wmi_ap_ps_peer_param {
+ /* Set uapsd configuration for a given peer.
+ *
+ * Include the delivery and trigger enabled state for every AC.
+ * The host MLME needs to set this based on AP capability and stations
+ * request Set in the association request received from the station.
+ *
+ * Lower 8 bits of the value specify the UAPSD configuration.
+ *
+ * (see enum wmi_ap_ps_param_uapsd)
+ * The default value is 0.
+ */
+ WMI_AP_PS_PEER_PARAM_UAPSD = 0,
+
+ /*
+ * Set the service period for a UAPSD capable station
+ *
+ * The service period from wme ie in the (re)assoc request frame.
+ *
+ * (see enum wmi_ap_ps_peer_param_max_sp)
+ */
+ WMI_AP_PS_PEER_PARAM_MAX_SP = 1,
+
+ /* Time in seconds for aging out buffered frames for STA in PS */
+ WMI_AP_PS_PEER_PARAM_AGEOUT_TIME = 2,
+};
+
+struct wmi_ap_ps_peer_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+
+ /* AP powersave param (see enum wmi_ap_ps_peer_param) */
+ __le32 param_id;
+
+ /* AP powersave param value */
+ __le32 param_value;
+} __packed;
+
+/* 128 clients = 4 words */
+#define WMI_TIM_BITMAP_ARRAY_SIZE 4
+
+struct wmi_tim_info {
+ __le32 tim_len;
+ __le32 tim_mcast;
+ __le32 tim_bitmap[WMI_TIM_BITMAP_ARRAY_SIZE];
+ __le32 tim_changed;
+ __le32 tim_num_ps_pending;
+} __packed;
+
+/* Maximum number of NOA Descriptors supported */
+#define WMI_P2P_MAX_NOA_DESCRIPTORS 4
+#define WMI_P2P_OPPPS_ENABLE_BIT BIT(0)
+#define WMI_P2P_OPPPS_CTWINDOW_OFFSET 1
+#define WMI_P2P_NOA_CHANGED_BIT BIT(0)
+
+struct wmi_p2p_noa_info {
+ /* Bit 0 - Flag to indicate an update in NOA schedule
+ Bits 7-1 - Reserved */
+ u8 changed;
+ /* NOA index */
+ u8 index;
+ /* Bit 0 - Opp PS state of the AP
+ Bits 1-7 - Ctwindow in TUs */
+ u8 ctwindow_oppps;
+ /* Number of NOA descriptors */
+ u8 num_descriptors;
+
+ struct wmi_p2p_noa_descriptor descriptors[WMI_P2P_MAX_NOA_DESCRIPTORS];
+} __packed;
+
+struct wmi_bcn_info {
+ struct wmi_tim_info tim_info;
+ struct wmi_p2p_noa_info p2p_noa_info;
+} __packed;
+
+struct wmi_host_swba_event {
+ __le32 vdev_map;
+ struct wmi_bcn_info bcn_info[1];
+} __packed;
+
+#define WMI_MAX_AP_VDEV 16
+
+struct wmi_tbtt_offset_event {
+ __le32 vdev_map;
+ __le32 tbttoffset_list[WMI_MAX_AP_VDEV];
+} __packed;
+
+
+struct wmi_peer_create_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_peer_delete_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_peer_flush_tids_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 peer_tid_bitmap;
+} __packed;
+
+struct wmi_fixed_rate {
+ /*
+ * rate mode . 0: disable fixed rate (auto rate)
+ * 1: legacy (non 11n) rate specified as ieee rate 2*Mbps
+ * 2: ht20 11n rate specified as mcs index
+ * 3: ht40 11n rate specified as mcs index
+ */
+ __le32 rate_mode;
+ /*
+ * 4 rate values for 4 rate series. series 0 is stored in byte 0 (LSB)
+ * and series 3 is stored at byte 3 (MSB)
+ */
+ __le32 rate_series;
+ /*
+ * 4 retry counts for 4 rate series. retry count for rate 0 is stored
+ * in byte 0 (LSB) and retry count for rate 3 is stored at byte 3
+ * (MSB)
+ */
+ __le32 rate_retries;
+} __packed;
+
+struct wmi_peer_fixed_rate_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+ /* fixed rate */
+ struct wmi_fixed_rate peer_fixed_rate;
+} __packed;
+
+#define WMI_MGMT_TID 17
+
+struct wmi_addba_clear_resp_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_addba_send_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+ /* Tid number */
+ __le32 tid;
+ /* Buffer/Window size*/
+ __le32 buffersize;
+} __packed;
+
+struct wmi_delba_send_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+ /* Tid number */
+ __le32 tid;
+ /* Is Initiator */
+ __le32 initiator;
+ /* Reason code */
+ __le32 reasoncode;
+} __packed;
+
+struct wmi_addba_setresponse_cmd {
+ /* unique id identifying the vdev, generated by the caller */
+ __le32 vdev_id;
+ /* peer mac address */
+ struct wmi_mac_addr peer_macaddr;
+ /* Tid number */
+ __le32 tid;
+ /* status code */
+ __le32 statuscode;
+} __packed;
+
+struct wmi_send_singleamsdu_cmd {
+ /* unique id identifying the vdev, generated by the caller */
+ __le32 vdev_id;
+ /* peer mac address */
+ struct wmi_mac_addr peer_macaddr;
+ /* Tid number */
+ __le32 tid;
+} __packed;
+
+enum wmi_peer_smps_state {
+ WMI_PEER_SMPS_PS_NONE = 0x0,
+ WMI_PEER_SMPS_STATIC = 0x1,
+ WMI_PEER_SMPS_DYNAMIC = 0x2
+};
+
+enum wmi_peer_param {
+ WMI_PEER_SMPS_STATE = 0x1, /* see %wmi_peer_smps_state */
+ WMI_PEER_AMPDU = 0x2,
+ WMI_PEER_AUTHORIZE = 0x3,
+ WMI_PEER_CHAN_WIDTH = 0x4,
+ WMI_PEER_NSS = 0x5,
+ WMI_PEER_USE_4ADDR = 0x6
+};
+
+struct wmi_peer_set_param_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 param_id;
+ __le32 param_value;
+} __packed;
+
+#define MAX_SUPPORTED_RATES 128
+
+struct wmi_rate_set {
+ /* total number of rates */
+ __le32 num_rates;
+ /*
+ * rates (each 8bit value) packed into a 32 bit word.
+ * the rates are filled from least significant byte to most
+ * significant byte.
+ */
+ __le32 rates[(MAX_SUPPORTED_RATES/4)+1];
+} __packed;
+
+struct wmi_rate_set_arg {
+ unsigned int num_rates;
+ u8 rates[MAX_SUPPORTED_RATES];
+};
+
+/*
+ * NOTE: It would bea good idea to represent the Tx MCS
+ * info in one word and Rx in another word. This is split
+ * into multiple words for convenience
+ */
+struct wmi_vht_rate_set {
+ __le32 rx_max_rate; /* Max Rx data rate */
+ __le32 rx_mcs_set; /* Negotiated RX VHT rates */
+ __le32 tx_max_rate; /* Max Tx data rate */
+ __le32 tx_mcs_set; /* Negotiated TX VHT rates */
+} __packed;
+
+struct wmi_vht_rate_set_arg {
+ u32 rx_max_rate;
+ u32 rx_mcs_set;
+ u32 tx_max_rate;
+ u32 tx_mcs_set;
+};
+
+struct wmi_peer_set_rates_cmd {
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+ /* legacy rate set */
+ struct wmi_rate_set peer_legacy_rates;
+ /* ht rate set */
+ struct wmi_rate_set peer_ht_rates;
+} __packed;
+
+struct wmi_peer_set_q_empty_callback_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+ __le32 callback_enable;
+} __packed;
+
+#define WMI_PEER_AUTH 0x00000001
+#define WMI_PEER_QOS 0x00000002
+#define WMI_PEER_NEED_PTK_4_WAY 0x00000004
+#define WMI_PEER_NEED_GTK_2_WAY 0x00000010
+#define WMI_PEER_APSD 0x00000800
+#define WMI_PEER_HT 0x00001000
+#define WMI_PEER_40MHZ 0x00002000
+#define WMI_PEER_STBC 0x00008000
+#define WMI_PEER_LDPC 0x00010000
+#define WMI_PEER_DYN_MIMOPS 0x00020000
+#define WMI_PEER_STATIC_MIMOPS 0x00040000
+#define WMI_PEER_SPATIAL_MUX 0x00200000
+#define WMI_PEER_VHT 0x02000000
+#define WMI_PEER_80MHZ 0x04000000
+#define WMI_PEER_PMF 0x08000000
+
+/*
+ * Peer rate capabilities.
+ *
+ * This is of interest to the ratecontrol
+ * module which resides in the firmware. The bit definitions are
+ * consistent with that defined in if_athrate.c.
+ */
+#define WMI_RC_DS_FLAG 0x01
+#define WMI_RC_CW40_FLAG 0x02
+#define WMI_RC_SGI_FLAG 0x04
+#define WMI_RC_HT_FLAG 0x08
+#define WMI_RC_RTSCTS_FLAG 0x10
+#define WMI_RC_TX_STBC_FLAG 0x20
+#define WMI_RC_RX_STBC_FLAG 0xC0
+#define WMI_RC_RX_STBC_FLAG_S 6
+#define WMI_RC_WEP_TKIP_FLAG 0x100
+#define WMI_RC_TS_FLAG 0x200
+#define WMI_RC_UAPSD_FLAG 0x400
+
+/* Maximum listen interval supported by hw in units of beacon interval */
+#define ATH10K_MAX_HW_LISTEN_INTERVAL 5
+
+struct wmi_peer_assoc_complete_cmd {
+ struct wmi_mac_addr peer_macaddr;
+ __le32 vdev_id;
+ __le32 peer_new_assoc; /* 1=assoc, 0=reassoc */
+ __le32 peer_associd; /* 16 LSBs */
+ __le32 peer_flags;
+ __le32 peer_caps; /* 16 LSBs */
+ __le32 peer_listen_intval;
+ __le32 peer_ht_caps;
+ __le32 peer_max_mpdu;
+ __le32 peer_mpdu_density; /* 0..16 */
+ __le32 peer_rate_caps;
+ struct wmi_rate_set peer_legacy_rates;
+ struct wmi_rate_set peer_ht_rates;
+ __le32 peer_nss; /* num of spatial streams */
+ __le32 peer_vht_caps;
+ __le32 peer_phymode;
+ struct wmi_vht_rate_set peer_vht_rates;
+ /* HT Operation Element of the peer. Five bytes packed in 2
+ * INT32 array and filled from lsb to msb. */
+ __le32 peer_ht_info[2];
+} __packed;
+
+struct wmi_peer_assoc_complete_arg {
+ u8 addr[ETH_ALEN];
+ u32 vdev_id;
+ bool peer_reassoc;
+ u16 peer_aid;
+ u32 peer_flags; /* see %WMI_PEER_ */
+ u16 peer_caps;
+ u32 peer_listen_intval;
+ u32 peer_ht_caps;
+ u32 peer_max_mpdu;
+ u32 peer_mpdu_density; /* 0..16 */
+ u32 peer_rate_caps; /* see %WMI_RC_ */
+ struct wmi_rate_set_arg peer_legacy_rates;
+ struct wmi_rate_set_arg peer_ht_rates;
+ u32 peer_num_spatial_streams;
+ u32 peer_vht_caps;
+ enum wmi_phy_mode peer_phymode;
+ struct wmi_vht_rate_set_arg peer_vht_rates;
+};
+
+struct wmi_peer_add_wds_entry_cmd {
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+ /* wds MAC addr */
+ struct wmi_mac_addr wds_macaddr;
+} __packed;
+
+struct wmi_peer_remove_wds_entry_cmd {
+ /* wds MAC addr */
+ struct wmi_mac_addr wds_macaddr;
+} __packed;
+
+struct wmi_peer_q_empty_callback_event {
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+/*
+ * Channel info WMI event
+ */
+struct wmi_chan_info_event {
+ __le32 err_code;
+ __le32 freq;
+ __le32 cmd_flags;
+ __le32 noise_floor;
+ __le32 rx_clear_count;
+ __le32 cycle_count;
+} __packed;
+
+/* Beacon filter wmi command info */
+#define BCN_FLT_MAX_SUPPORTED_IES 256
+#define BCN_FLT_MAX_ELEMS_IE_LIST (BCN_FLT_MAX_SUPPORTED_IES / 32)
+
+struct bss_bcn_stats {
+ __le32 vdev_id;
+ __le32 bss_bcnsdropped;
+ __le32 bss_bcnsdelivered;
+} __packed;
+
+struct bcn_filter_stats {
+ __le32 bcns_dropped;
+ __le32 bcns_delivered;
+ __le32 activefilters;
+ struct bss_bcn_stats bss_stats;
+} __packed;
+
+struct wmi_add_bcn_filter_cmd {
+ u32 vdev_id;
+ u32 ie_map[BCN_FLT_MAX_ELEMS_IE_LIST];
+} __packed;
+
+enum wmi_sta_keepalive_method {
+ WMI_STA_KEEPALIVE_METHOD_NULL_FRAME = 1,
+ WMI_STA_KEEPALIVE_METHOD_UNSOLICITATED_ARP_RESPONSE = 2,
+};
+
+/* note: ip4 addresses are in network byte order, i.e. big endian */
+struct wmi_sta_keepalive_arp_resp {
+ __be32 src_ip4_addr;
+ __be32 dest_ip4_addr;
+ struct wmi_mac_addr dest_mac_addr;
+} __packed;
+
+struct wmi_sta_keepalive_cmd {
+ __le32 vdev_id;
+ __le32 enabled;
+ __le32 method; /* WMI_STA_KEEPALIVE_METHOD_ */
+ __le32 interval; /* in seconds */
+ struct wmi_sta_keepalive_arp_resp arp_resp;
+} __packed;
+
+#define ATH10K_RTS_MAX 2347
+#define ATH10K_FRAGMT_THRESHOLD_MIN 540
+#define ATH10K_FRAGMT_THRESHOLD_MAX 2346
+
+#define WMI_MAX_EVENT 0x1000
+/* Maximum number of pending TXed WMI packets */
+#define WMI_MAX_PENDING_TX_COUNT 128
+#define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr)
+
+/* By default disable power save for IBSS */
+#define ATH10K_DEFAULT_ATIM 0
+
+struct ath10k;
+struct ath10k_vif;
+
+int ath10k_wmi_attach(struct ath10k *ar);
+void ath10k_wmi_detach(struct ath10k *ar);
+int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);
+int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
+void ath10k_wmi_flush_tx(struct ath10k *ar);
+
+int ath10k_wmi_connect_htc_service(struct ath10k *ar);
+int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
+ const struct wmi_channel_arg *);
+int ath10k_wmi_pdev_suspend_target(struct ath10k *ar);
+int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
+int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
+ u16 rd5g, u16 ctl2g, u16 ctl5g);
+int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,
+ u32 value);
+int ath10k_wmi_cmd_init(struct ath10k *ar);
+int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *);
+void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *);
+int ath10k_wmi_stop_scan(struct ath10k *ar,
+ const struct wmi_stop_scan_arg *arg);
+int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
+ enum wmi_vdev_type type,
+ enum wmi_vdev_subtype subtype,
+ const u8 macaddr[ETH_ALEN]);
+int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id);
+int ath10k_wmi_vdev_start(struct ath10k *ar,
+ const struct wmi_vdev_start_request_arg *);
+int ath10k_wmi_vdev_restart(struct ath10k *ar,
+ const struct wmi_vdev_start_request_arg *);
+int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id);
+int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
+ const u8 *bssid);
+int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id);
+int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
+ enum wmi_vdev_param param_id, u32 param_value);
+int ath10k_wmi_vdev_install_key(struct ath10k *ar,
+ const struct wmi_vdev_install_key_arg *arg);
+int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN]);
+int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN]);
+int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN], u32 tid_bitmap);
+int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
+ const u8 *peer_addr,
+ enum wmi_peer_param param_id, u32 param_value);
+int ath10k_wmi_peer_assoc(struct ath10k *ar,
+ const struct wmi_peer_assoc_complete_arg *arg);
+int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_ps_mode psmode);
+int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_powersave_param param_id,
+ u32 value);
+int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ enum wmi_ap_ps_peer_param param_id, u32 value);
+int ath10k_wmi_scan_chan_list(struct ath10k *ar,
+ const struct wmi_scan_chan_list_arg *arg);
+int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg);
+int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
+ const struct wmi_pdev_set_wmm_params_arg *arg);
+int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);
+
+#endif /* _WMI_H_ */
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index 8e8bcc7a4805..e9bc9e616b69 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -185,7 +185,6 @@ static int ath_ahb_probe(struct platform_device *pdev)
err_free_hw:
ieee80211_free_hw(hw);
- platform_set_drvdata(pdev, NULL);
err_iounmap:
iounmap(mem);
err_out:
@@ -221,7 +220,6 @@ static int ath_ahb_remove(struct platform_device *pdev)
ath5k_deinit_ah(ah);
iounmap(ah->iobase);
- platform_set_drvdata(pdev, NULL);
ieee80211_free_hw(hw);
return 0;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 7f702fe3ecc2..ce67ab791eae 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -60,6 +60,7 @@
#include <asm/unaligned.h>
+#include <net/mac80211.h>
#include "base.h"
#include "reg.h"
#include "debug.h"
@@ -666,9 +667,46 @@ static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
return htype;
}
+static struct ieee80211_rate *
+ath5k_get_rate(const struct ieee80211_hw *hw,
+ const struct ieee80211_tx_info *info,
+ struct ath5k_buf *bf, int idx)
+{
+ /*
+ * convert a ieee80211_tx_rate RC-table entry to
+ * the respective ieee80211_rate struct
+ */
+ if (bf->rates[idx].idx < 0) {
+ return NULL;
+ }
+
+ return &hw->wiphy->bands[info->band]->bitrates[ bf->rates[idx].idx ];
+}
+
+static u16
+ath5k_get_rate_hw_value(const struct ieee80211_hw *hw,
+ const struct ieee80211_tx_info *info,
+ struct ath5k_buf *bf, int idx)
+{
+ struct ieee80211_rate *rate;
+ u16 hw_rate;
+ u8 rc_flags;
+
+ rate = ath5k_get_rate(hw, info, bf, idx);
+ if (!rate)
+ return 0;
+
+ rc_flags = bf->rates[idx].flags;
+ hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ?
+ rate->hw_value_short : rate->hw_value;
+
+ return hw_rate;
+}
+
static int
ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
- struct ath5k_txq *txq, int padsize)
+ struct ath5k_txq *txq, int padsize,
+ struct ieee80211_tx_control *control)
{
struct ath5k_desc *ds = bf->desc;
struct sk_buff *skb = bf->skb;
@@ -688,7 +726,11 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
DMA_TO_DEVICE);
- rate = ieee80211_get_tx_rate(ah->hw, info);
+ ieee80211_get_tx_rates(info->control.vif, (control) ? control->sta : NULL, skb, bf->rates,
+ ARRAY_SIZE(bf->rates));
+
+ rate = ath5k_get_rate(ah->hw, info, bf, 0);
+
if (!rate) {
ret = -EINVAL;
goto err_unmap;
@@ -698,8 +740,8 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
flags |= AR5K_TXDESC_NOACK;
rc_flags = info->control.rates[0].flags;
- hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ?
- rate->hw_value_short : rate->hw_value;
+
+ hw_rate = ath5k_get_rate_hw_value(ah->hw, info, bf, 0);
pktlen = skb->len;
@@ -722,12 +764,13 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
duration = le16_to_cpu(ieee80211_ctstoself_duration(ah->hw,
info->control.vif, pktlen, info));
}
+
ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
ieee80211_get_hdrlen_from_skb(skb), padsize,
get_hw_packet_type(skb),
(ah->ah_txpower.txp_requested * 2),
hw_rate,
- info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
+ bf->rates[0].count, keyidx, ah->ah_tx_ant, flags,
cts_rate, duration);
if (ret)
goto err_unmap;
@@ -736,13 +779,15 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
if (ah->ah_capabilities.cap_has_mrr_support) {
memset(mrr_rate, 0, sizeof(mrr_rate));
memset(mrr_tries, 0, sizeof(mrr_tries));
+
for (i = 0; i < 3; i++) {
- rate = ieee80211_get_alt_retry_rate(ah->hw, info, i);
+
+ rate = ath5k_get_rate(ah->hw, info, bf, i);
if (!rate)
break;
- mrr_rate[i] = rate->hw_value;
- mrr_tries[i] = info->control.rates[i + 1].count;
+ mrr_rate[i] = ath5k_get_rate_hw_value(ah->hw, info, bf, i);
+ mrr_tries[i] = bf->rates[i].count;
}
ath5k_hw_setup_mrr_tx_desc(ah, ds,
@@ -1515,7 +1560,7 @@ unlock:
void
ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
- struct ath5k_txq *txq)
+ struct ath5k_txq *txq, struct ieee80211_tx_control *control)
{
struct ath5k_hw *ah = hw->priv;
struct ath5k_buf *bf;
@@ -1555,7 +1600,7 @@ ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
bf->skb = skb;
- if (ath5k_txbuf_setup(ah, bf, txq, padsize)) {
+ if (ath5k_txbuf_setup(ah, bf, txq, padsize, control)) {
bf->skb = NULL;
spin_lock_irqsave(&ah->txbuflock, flags);
list_add_tail(&bf->list, &ah->txbuf);
@@ -1571,11 +1616,13 @@ drop_packet:
static void
ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb,
- struct ath5k_txq *txq, struct ath5k_tx_status *ts)
+ struct ath5k_txq *txq, struct ath5k_tx_status *ts,
+ struct ath5k_buf *bf)
{
struct ieee80211_tx_info *info;
u8 tries[3];
int i;
+ int size = 0;
ah->stats.tx_all_count++;
ah->stats.tx_bytes_count += skb->len;
@@ -1587,6 +1634,9 @@ ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb,
ieee80211_tx_info_clear_status(info);
+ size = min_t(int, sizeof(info->status.rates), sizeof(bf->rates));
+ memcpy(info->status.rates, bf->rates, size);
+
for (i = 0; i < ts->ts_final_idx; i++) {
struct ieee80211_tx_rate *r =
&info->status.rates[i];
@@ -1663,7 +1713,7 @@ ath5k_tx_processq(struct ath5k_hw *ah, struct ath5k_txq *txq)
dma_unmap_single(ah->dev, bf->skbaddr, skb->len,
DMA_TO_DEVICE);
- ath5k_tx_frame_completed(ah, skb, txq, &ts);
+ ath5k_tx_frame_completed(ah, skb, txq, &ts, bf);
}
/*
@@ -1917,7 +1967,7 @@ ath5k_beacon_send(struct ath5k_hw *ah)
skb = ieee80211_get_buffered_bc(ah->hw, vif);
while (skb) {
- ath5k_tx_queue(ah->hw, skb, ah->cabq);
+ ath5k_tx_queue(ah->hw, skb, ah->cabq, NULL);
if (ah->cabq->txq_len >= ah->cabq->txq_max)
break;
@@ -2442,7 +2492,8 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_MFP_CAPABLE |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+ IEEE80211_HW_SUPPORTS_RC_TABLE;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index 6c94c7ff2350..ca9a83ceeee1 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -47,6 +47,7 @@ struct ath5k_hw;
struct ath5k_txq;
struct ieee80211_channel;
struct ath_bus_ops;
+struct ieee80211_tx_control;
enum nl80211_iftype;
enum ath5k_srev_type {
@@ -61,11 +62,12 @@ struct ath5k_srev_name {
};
struct ath5k_buf {
- struct list_head list;
- struct ath5k_desc *desc; /* virtual addr of desc */
- dma_addr_t daddr; /* physical addr of desc */
- struct sk_buff *skb; /* skbuff for buf */
- dma_addr_t skbaddr;/* physical addr of skb data */
+ struct list_head list;
+ struct ath5k_desc *desc; /* virtual addr of desc */
+ dma_addr_t daddr; /* physical addr of desc */
+ struct sk_buff *skb; /* skbuff for buf */
+ dma_addr_t skbaddr; /* physical addr of skb data */
+ struct ieee80211_tx_rate rates[4]; /* number of multi-rate stages */
};
struct ath5k_vif {
@@ -103,7 +105,7 @@ int ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan);
void ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
void ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
- struct ath5k_txq *txq);
+ struct ath5k_txq *txq, struct ieee80211_tx_control *control);
const char *ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val);
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 06f86f435711..40825d43322e 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -66,7 +66,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
return;
}
- ath5k_tx_queue(hw, skb, &ah->txqs[qnum]);
+ ath5k_tx_queue(hw, skb, &ah->txqs[qnum], control);
}
@@ -325,7 +325,7 @@ ath5k_prepare_multicast(struct ieee80211_hw *hw,
struct netdev_hw_addr *ha;
mfilt[0] = 0;
- mfilt[1] = 1;
+ mfilt[1] = 0;
netdev_hw_addr_list_for_each(ha, mc_list) {
/* calculate XOR of eight 6-bit values */
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 5c9736a94e54..2437ad26949d 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -3175,10 +3175,21 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
{
struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev);
struct ath6kl *ar = ath6kl_priv(vif->ndev);
- u32 id;
+ u32 id, freq;
const struct ieee80211_mgmt *mgmt;
bool more_data, queued;
+ /* default to the current channel, but use the one specified as argument
+ * if any
+ */
+ freq = vif->ch_hint;
+ if (chan)
+ freq = chan->center_freq;
+
+ /* never send freq zero to the firmware */
+ if (WARN_ON(freq == 0))
+ return -EINVAL;
+
mgmt = (const struct ieee80211_mgmt *) buf;
if (vif->nw_type == AP_NETWORK && test_bit(CONNECTED, &vif->flags) &&
ieee80211_is_probe_resp(mgmt->frame_control) &&
@@ -3188,8 +3199,7 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
* command to allow the target to fill in the generic IEs.
*/
*cookie = 0; /* TX status not supported */
- return ath6kl_send_go_probe_resp(vif, buf, len,
- chan->center_freq);
+ return ath6kl_send_go_probe_resp(vif, buf, len, freq);
}
id = vif->send_action_id++;
@@ -3205,17 +3215,14 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
/* AP mode Power saving processing */
if (vif->nw_type == AP_NETWORK) {
- queued = ath6kl_mgmt_powersave_ap(vif,
- id, chan->center_freq,
- wait, buf,
- len, &more_data, no_cck);
+ queued = ath6kl_mgmt_powersave_ap(vif, id, freq, wait, buf, len,
+ &more_data, no_cck);
if (queued)
return 0;
}
- return ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, id,
- chan->center_freq, wait,
- buf, len, no_cck);
+ return ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, id, freq,
+ wait, buf, len, no_cck);
}
static void ath6kl_mgmt_frame_register(struct wiphy *wiphy,
@@ -3679,6 +3686,20 @@ err:
return NULL;
}
+#ifdef CONFIG_PM
+static const struct wiphy_wowlan_support ath6kl_wowlan_support = {
+ .flags = WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE |
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_EAP_IDENTITY_REQ |
+ WIPHY_WOWLAN_4WAY_HANDSHAKE,
+ .n_patterns = WOW_MAX_FILTERS_PER_LIST,
+ .pattern_min_len = 1,
+ .pattern_max_len = WOW_PATTERN_SIZE,
+};
+#endif
+
int ath6kl_cfg80211_init(struct ath6kl *ar)
{
struct wiphy *wiphy = ar->wiphy;
@@ -3772,15 +3793,7 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
#ifdef CONFIG_PM
- wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
- WIPHY_WOWLAN_DISCONNECT |
- WIPHY_WOWLAN_GTK_REKEY_FAILURE |
- WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
- WIPHY_WOWLAN_EAP_IDENTITY_REQ |
- WIPHY_WOWLAN_4WAY_HANDSHAKE;
- wiphy->wowlan.n_patterns = WOW_MAX_FILTERS_PER_LIST;
- wiphy->wowlan.pattern_min_len = 1;
- wiphy->wowlan.pattern_max_len = WOW_PATTERN_SIZE;
+ wiphy->wowlan = &ath6kl_wowlan_support;
#endif
wiphy->max_sched_scan_ssids = MAX_PROBED_SSIDS;
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index fe38b836cb26..dbfd17d0a5fa 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -1240,20 +1240,14 @@ static ssize_t ath6kl_force_roam_write(struct file *file,
char buf[20];
size_t len;
u8 bssid[ETH_ALEN];
- int i;
- int addr[ETH_ALEN];
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
- if (sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x",
- &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5])
- != ETH_ALEN)
+ if (!mac_pton(buf, bssid))
return -EINVAL;
- for (i = 0; i < ETH_ALEN; i++)
- bssid[i] = addr[i];
ret = ath6kl_wmi_force_roam_cmd(ar->wmi, bssid);
if (ret)
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 40ffee6184fd..6a67881f94d6 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -1696,10 +1696,16 @@ static int __ath6kl_init_hw_start(struct ath6kl *ar)
test_bit(WMI_READY,
&ar->flag),
WMI_TIMEOUT);
+ if (timeleft <= 0) {
+ clear_bit(WMI_READY, &ar->flag);
+ ath6kl_err("wmi is not ready or wait was interrupted: %ld\n",
+ timeleft);
+ ret = -EIO;
+ goto err_htc_stop;
+ }
ath6kl_dbg(ATH6KL_DBG_BOOT, "firmware booted\n");
-
if (test_and_clear_bit(FIRST_BOOT, &ar->flag)) {
ath6kl_info("%s %s fw %s api %d%s\n",
ar->hw.name,
@@ -1718,12 +1724,6 @@ static int __ath6kl_init_hw_start(struct ath6kl *ar)
goto err_htc_stop;
}
- if (!timeleft || signal_pending(current)) {
- ath6kl_err("wmi is not ready or wait was interrupted\n");
- ret = -EIO;
- goto err_htc_stop;
- }
-
ath6kl_dbg(ATH6KL_DBG_TRC, "%s: wmi is ready\n", __func__);
/* communicate the wmi protocol verision to the target */
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index fb141454c6d2..7126bdd4236c 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -345,17 +345,17 @@ static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
{
struct hif_scatter_req *s_req;
struct bus_request *bus_req;
- int i, scat_req_sz, scat_list_sz, sg_sz, buf_sz;
+ int i, scat_req_sz, scat_list_sz, size;
u8 *virt_buf;
scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item);
scat_req_sz = sizeof(*s_req) + scat_list_sz;
if (!virt_scat)
- sg_sz = sizeof(struct scatterlist) * n_scat_entry;
+ size = sizeof(struct scatterlist) * n_scat_entry;
else
- buf_sz = 2 * L1_CACHE_BYTES +
- ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
+ size = 2 * L1_CACHE_BYTES +
+ ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
for (i = 0; i < n_scat_req; i++) {
/* allocate the scatter request */
@@ -364,7 +364,7 @@ static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
return -ENOMEM;
if (virt_scat) {
- virt_buf = kzalloc(buf_sz, GFP_KERNEL);
+ virt_buf = kzalloc(size, GFP_KERNEL);
if (!virt_buf) {
kfree(s_req);
return -ENOMEM;
@@ -374,7 +374,7 @@ static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
(u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf);
} else {
/* allocate sglist */
- s_req->sgentries = kzalloc(sg_sz, GFP_KERNEL);
+ s_req->sgentries = kzalloc(size, GFP_KERNEL);
if (!s_req->sgentries) {
kfree(s_req);
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index bed0d337712d..f38ff6a6255e 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -1061,6 +1061,22 @@ static void ath6kl_usb_cleanup_scatter(struct ath6kl *ar)
return;
}
+static int ath6kl_usb_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
+{
+ /*
+ * cfg80211 suspend/WOW currently not supported for USB.
+ */
+ return 0;
+}
+
+static int ath6kl_usb_resume(struct ath6kl *ar)
+{
+ /*
+ * cfg80211 resume currently not supported for USB.
+ */
+ return 0;
+}
+
static const struct ath6kl_hif_ops ath6kl_usb_ops = {
.diag_read32 = ath6kl_usb_diag_read32,
.diag_write32 = ath6kl_usb_diag_write32,
@@ -1074,6 +1090,8 @@ static const struct ath6kl_hif_ops ath6kl_usb_ops = {
.pipe_map_service = ath6kl_usb_map_service_pipe,
.pipe_get_free_queue_number = ath6kl_usb_get_free_queue_number,
.cleanup_scatter = ath6kl_usb_cleanup_scatter,
+ .suspend = ath6kl_usb_suspend,
+ .resume = ath6kl_usb_resume,
};
/* ath6kl usb driver registered functions */
@@ -1152,7 +1170,7 @@ static void ath6kl_usb_remove(struct usb_interface *interface)
#ifdef CONFIG_PM
-static int ath6kl_usb_suspend(struct usb_interface *interface,
+static int ath6kl_usb_pm_suspend(struct usb_interface *interface,
pm_message_t message)
{
struct ath6kl_usb *device;
@@ -1162,7 +1180,7 @@ static int ath6kl_usb_suspend(struct usb_interface *interface,
return 0;
}
-static int ath6kl_usb_resume(struct usb_interface *interface)
+static int ath6kl_usb_pm_resume(struct usb_interface *interface)
{
struct ath6kl_usb *device;
device = usb_get_intfdata(interface);
@@ -1175,7 +1193,7 @@ static int ath6kl_usb_resume(struct usb_interface *interface)
return 0;
}
-static int ath6kl_usb_reset_resume(struct usb_interface *intf)
+static int ath6kl_usb_pm_reset_resume(struct usb_interface *intf)
{
if (usb_get_intfdata(intf))
ath6kl_usb_remove(intf);
@@ -1184,9 +1202,9 @@ static int ath6kl_usb_reset_resume(struct usb_interface *intf)
#else
-#define ath6kl_usb_suspend NULL
-#define ath6kl_usb_resume NULL
-#define ath6kl_usb_reset_resume NULL
+#define ath6kl_usb_pm_suspend NULL
+#define ath6kl_usb_pm_resume NULL
+#define ath6kl_usb_pm_reset_resume NULL
#endif
@@ -1201,9 +1219,9 @@ MODULE_DEVICE_TABLE(usb, ath6kl_usb_ids);
static struct usb_driver ath6kl_usb_driver = {
.name = "ath6kl_usb",
.probe = ath6kl_usb_probe,
- .suspend = ath6kl_usb_suspend,
- .resume = ath6kl_usb_resume,
- .reset_resume = ath6kl_usb_reset_resume,
+ .suspend = ath6kl_usb_pm_suspend,
+ .resume = ath6kl_usb_pm_resume,
+ .reset_resume = ath6kl_usb_pm_reset_resume,
.disconnect = ath6kl_usb_remove,
.id_table = ath6kl_usb_ids,
.supports_autosuspend = true,
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 3c2cbc9d6295..d491a3178986 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -28,7 +28,7 @@ config ATH9K
Atheros IEEE 802.11n AR5008, AR9001 and AR9002 family
of chipsets. For a specific list of supported external
cards, laptops that already ship with these cards and
- APs that come with these cards refer to to ath9k wiki
+ APs that come with these cards refer to ath9k wiki
products page:
http://wireless.kernel.org/en/users/Drivers/ath9k/products
@@ -84,14 +84,6 @@ config ATH9K_DFS_CERTIFIED
developed. At this point enabling this option won't do anything
except increase code size.
-config ATH9K_MAC_DEBUG
- bool "Atheros MAC statistics"
- depends on ATH9K_DEBUGFS
- default y
- ---help---
- This option enables collection of statistics for Rx/Tx status
- data and some other MAC related statistics
-
config ATH9K_LEGACY_RATE_CONTROL
bool "Atheros ath9k rate control"
depends on ATH9K
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index d1ff3c246a12..072e4b531067 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -150,7 +150,6 @@ static int ath_ahb_probe(struct platform_device *pdev)
free_irq(irq, sc);
err_free_hw:
ieee80211_free_hw(hw);
- platform_set_drvdata(pdev, NULL);
return ret;
}
@@ -164,7 +163,6 @@ static int ath_ahb_remove(struct platform_device *pdev)
ath9k_deinit_device(sc);
free_irq(sc->irq, sc);
ieee80211_free_hw(sc->hw);
- platform_set_drvdata(pdev, NULL);
}
return 0;
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 7ecd40f07a74..4994bea809eb 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -46,8 +46,8 @@ static const struct ani_ofdm_level_entry ofdm_level_table[] = {
{ 5, 4, 1 }, /* lvl 5 */
{ 6, 5, 1 }, /* lvl 6 */
{ 7, 6, 1 }, /* lvl 7 */
- { 7, 6, 0 }, /* lvl 8 */
- { 7, 7, 0 } /* lvl 9 */
+ { 7, 7, 1 }, /* lvl 8 */
+ { 7, 8, 0 } /* lvl 9 */
};
#define ATH9K_ANI_OFDM_NUM_LEVEL \
ARRAY_SIZE(ofdm_level_table)
@@ -91,8 +91,8 @@ static const struct ani_cck_level_entry cck_level_table[] = {
{ 4, 0 }, /* lvl 4 */
{ 5, 0 }, /* lvl 5 */
{ 6, 0 }, /* lvl 6 */
- { 6, 0 }, /* lvl 7 (only for high rssi) */
- { 7, 0 } /* lvl 8 (only for high rssi) */
+ { 7, 0 }, /* lvl 7 (only for high rssi) */
+ { 8, 0 } /* lvl 8 (only for high rssi) */
};
#define ATH9K_ANI_CCK_NUM_LEVEL \
@@ -118,10 +118,10 @@ static void ath9k_ani_restart(struct ath_hw *ah)
{
struct ar5416AniState *aniState;
- if (!DO_ANI(ah))
+ if (!ah->curchan)
return;
- aniState = &ah->curchan->ani;
+ aniState = &ah->ani;
aniState->listenTime = 0;
ENABLE_REGWRITE_BUFFER(ah);
@@ -143,7 +143,7 @@ static void ath9k_ani_restart(struct ath_hw *ah)
static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel,
bool scan)
{
- struct ar5416AniState *aniState = &ah->curchan->ani;
+ struct ar5416AniState *aniState = &ah->ani;
struct ath_common *common = ath9k_hw_common(ah);
const struct ani_ofdm_level_entry *entry_ofdm;
const struct ani_cck_level_entry *entry_cck;
@@ -177,10 +177,15 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel,
BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_HIGH)
weak_sig = true;
- if (aniState->ofdmWeakSigDetect != weak_sig)
- ath9k_hw_ani_control(ah,
- ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
- entry_ofdm->ofdm_weak_signal_on);
+ /*
+ * OFDM Weak signal detection is always enabled for AP mode.
+ */
+ if (ah->opmode != NL80211_IFTYPE_AP &&
+ aniState->ofdmWeakSigDetect != weak_sig) {
+ ath9k_hw_ani_control(ah,
+ ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
+ entry_ofdm->ofdm_weak_signal_on);
+ }
if (aniState->ofdmNoiseImmunityLevel >= ATH9K_ANI_OFDM_DEF_LEVEL) {
ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH;
@@ -195,10 +200,10 @@ static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah)
{
struct ar5416AniState *aniState;
- if (!DO_ANI(ah))
+ if (!ah->curchan)
return;
- aniState = &ah->curchan->ani;
+ aniState = &ah->ani;
if (aniState->ofdmNoiseImmunityLevel < ATH9K_ANI_OFDM_MAX_LEVEL)
ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel + 1, false);
@@ -210,7 +215,7 @@ static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah)
static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel,
bool scan)
{
- struct ar5416AniState *aniState = &ah->curchan->ani;
+ struct ar5416AniState *aniState = &ah->ani;
struct ath_common *common = ath9k_hw_common(ah);
const struct ani_ofdm_level_entry *entry_ofdm;
const struct ani_cck_level_entry *entry_cck;
@@ -251,10 +256,10 @@ static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah)
{
struct ar5416AniState *aniState;
- if (!DO_ANI(ah))
+ if (!ah->curchan)
return;
- aniState = &ah->curchan->ani;
+ aniState = &ah->ani;
if (aniState->cckNoiseImmunityLevel < ATH9K_ANI_CCK_MAX_LEVEL)
ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel + 1,
@@ -269,7 +274,7 @@ static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah)
{
struct ar5416AniState *aniState;
- aniState = &ah->curchan->ani;
+ aniState = &ah->ani;
/* lower OFDM noise immunity */
if (aniState->ofdmNoiseImmunityLevel > 0 &&
@@ -292,12 +297,12 @@ static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah)
*/
void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
{
- struct ar5416AniState *aniState = &ah->curchan->ani;
+ struct ar5416AniState *aniState = &ah->ani;
struct ath9k_channel *chan = ah->curchan;
struct ath_common *common = ath9k_hw_common(ah);
int ofdm_nil, cck_nil;
- if (!DO_ANI(ah))
+ if (!ah->curchan)
return;
BUG_ON(aniState == NULL);
@@ -363,24 +368,13 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
ath9k_hw_set_ofdm_nil(ah, ofdm_nil, is_scanning);
ath9k_hw_set_cck_nil(ah, cck_nil, is_scanning);
- /*
- * enable phy counters if hw supports or if not, enable phy
- * interrupts (so we can count each one)
- */
ath9k_ani_restart(ah);
-
- ENABLE_REGWRITE_BUFFER(ah);
-
- REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
- REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
-
- REGWRITE_BUFFER_FLUSH(ah);
}
static bool ath9k_hw_ani_read_counters(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- struct ar5416AniState *aniState = &ah->curchan->ani;
+ struct ar5416AniState *aniState = &ah->ani;
u32 phyCnt1, phyCnt2;
int32_t listenTime;
@@ -415,10 +409,10 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan)
struct ath_common *common = ath9k_hw_common(ah);
u32 ofdmPhyErrRate, cckPhyErrRate;
- if (!DO_ANI(ah))
+ if (!ah->curchan)
return;
- aniState = &ah->curchan->ani;
+ aniState = &ah->ani;
if (!ath9k_hw_ani_read_counters(ah))
return;
@@ -490,32 +484,22 @@ EXPORT_SYMBOL(ath9k_hw_disable_mib_counters);
void ath9k_hw_ani_init(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- int i;
+ struct ar5416AniState *ani = &ah->ani;
ath_dbg(common, ANI, "Initialize ANI\n");
ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH;
ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW;
-
ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH;
ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW;
- for (i = 0; i < ARRAY_SIZE(ah->channels); i++) {
- struct ath9k_channel *chan = &ah->channels[i];
- struct ar5416AniState *ani = &chan->ani;
-
- ani->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
-
- ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
-
- ani->mrcCCK = AR_SREV_9300_20_OR_LATER(ah) ? true : false;
-
- ani->ofdmsTurn = true;
-
- ani->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG;
- ani->cckNoiseImmunityLevel = ATH9K_ANI_CCK_DEF_LEVEL;
- ani->ofdmNoiseImmunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL;
- }
+ ani->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
+ ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
+ ani->mrcCCK = AR_SREV_9300_20_OR_LATER(ah) ? true : false;
+ ani->ofdmsTurn = true;
+ ani->ofdmWeakSigDetect = true;
+ ani->cckNoiseImmunityLevel = ATH9K_ANI_CCK_DEF_LEVEL;
+ ani->ofdmNoiseImmunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL;
/*
* since we expect some ongoing maintenance on the tables, let's sanity
@@ -524,9 +508,6 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
ah->aniperiod = ATH9K_ANI_PERIOD;
ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL;
- if (ah->config.enable_ani)
- ah->proc_phyerr |= HAL_PROCESS_ANI;
-
ath9k_ani_restart(ah);
ath9k_enable_mib_counters(ah);
}
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index dddb1361039a..b54a3fb01883 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -17,32 +17,19 @@
#ifndef ANI_H
#define ANI_H
-#define HAL_PROCESS_ANI 0x00000001
-
-#define DO_ANI(ah) (((ah)->proc_phyerr & HAL_PROCESS_ANI) && ah->curchan)
-
#define BEACON_RSSI(ahp) (ahp->stats.avgbrssi)
/* units are errors per second */
-#define ATH9K_ANI_OFDM_TRIG_HIGH 3500
+#define ATH9K_ANI_OFDM_TRIG_HIGH 3500
#define ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI 1000
-/* units are errors per second */
#define ATH9K_ANI_OFDM_TRIG_LOW 400
#define ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI 900
-/* units are errors per second */
#define ATH9K_ANI_CCK_TRIG_HIGH 600
-
-/* units are errors per second */
#define ATH9K_ANI_CCK_TRIG_LOW 300
-#define ATH9K_ANI_NOISE_IMMUNE_LVL 4
-#define ATH9K_ANI_USE_OFDM_WEAK_SIG true
-#define ATH9K_ANI_CCK_WEAK_SIG_THR false
-
#define ATH9K_ANI_SPUR_IMMUNE_LVL 3
-
#define ATH9K_ANI_FIRSTEP_LVL 2
#define ATH9K_ANI_RSSI_THR_HIGH 40
@@ -53,10 +40,6 @@
/* in ms */
#define ATH9K_ANI_POLLINTERVAL 1000
-#define HAL_NOISE_IMMUNE_MAX 4
-#define HAL_SPUR_IMMUNE_MAX 7
-#define HAL_FIRST_STEP_MAX 2
-
#define ATH9K_SIG_FIRSTEP_SETTING_MIN 0
#define ATH9K_SIG_FIRSTEP_SETTING_MAX 20
#define ATH9K_SIG_SPUR_IMM_SETTING_MIN 0
@@ -111,7 +94,7 @@ struct ar5416AniState {
u8 mrcCCK;
u8 spurImmunityLevel;
u8 firstepLevel;
- u8 ofdmWeakSigDetect;
+ bool ofdmWeakSigDetect;
u32 listenTime;
u32 ofdmPhyErrCount;
u32 cckPhyErrCount;
@@ -119,8 +102,6 @@ struct ar5416AniState {
};
struct ar5416Stats {
- u32 ast_ani_niup;
- u32 ast_ani_nidown;
u32 ast_ani_spurup;
u32 ast_ani_spurdown;
u32 ast_ani_ofdmon;
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 391da5ad6a99..1576d58291d4 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -610,7 +610,15 @@ static void ar5008_hw_override_ini(struct ath_hw *ah,
REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
if (AR_SREV_9280_20_OR_LATER(ah)) {
- val = REG_READ(ah, AR_PCU_MISC_MODE2);
+ /*
+ * For AR9280 and above, there is a new feature that allows
+ * Multicast search based on both MAC Address and Key ID.
+ * By default, this feature is enabled. But since the driver
+ * is not using this feature, we switch it off; otherwise
+ * multicast search based on MAC addr only will fail.
+ */
+ val = REG_READ(ah, AR_PCU_MISC_MODE2) &
+ (~AR_ADHOC_MCAST_KEYID_ENABLE);
if (!AR_SREV_9271(ah))
val &= ~AR_PCU_MISC_MODE2_HWWAR1;
@@ -931,7 +939,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_channel *chan = ah->curchan;
- struct ar5416AniState *aniState = &chan->ani;
+ struct ar5416AniState *aniState = &ah->ani;
s32 value, value2;
switch (cmd & ah->ani_function) {
@@ -1207,7 +1215,7 @@ static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_channel *chan = ah->curchan;
- struct ar5416AniState *aniState = &chan->ani;
+ struct ar5416AniState *aniState = &ah->ani;
struct ath9k_ani_default *iniDef;
u32 val;
@@ -1251,7 +1259,7 @@ static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah)
/* these levels just got reset to defaults by the INI */
aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
- aniState->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG;
+ aniState->ofdmWeakSigDetect = true;
aniState->mrcCCK = false; /* not available on pre AR9003 */
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index 830daa12feb6..8dc2d089cdef 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -38,10 +38,6 @@ static int ar9002_hw_init_mode_regs(struct ath_hw *ah)
else
INIT_INI_ARRAY(&ah->iniPcieSerdes,
ar9280PciePhy_clkreq_always_on_L1_9280);
-#ifdef CONFIG_PM_SLEEP
- INIT_INI_ARRAY(&ah->iniPcieSerdesWow,
- ar9280PciePhy_awow);
-#endif
if (AR_SREV_9287_11_OR_LATER(ah)) {
INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
index beb6162cf97c..4d18c66a6790 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
@@ -925,20 +925,6 @@ static const u32 ar9280PciePhy_clkreq_always_on_L1_9280[][2] = {
{0x00004044, 0x00000000},
};
-static const u32 ar9280PciePhy_awow[][2] = {
- /* Addr allmodes */
- {0x00004040, 0x9248fd00},
- {0x00004040, 0x24924924},
- {0x00004040, 0xa8000019},
- {0x00004040, 0x13160820},
- {0x00004040, 0xe5980560},
- {0x00004040, 0xc01dcffd},
- {0x00004040, 0x1aaabe41},
- {0x00004040, 0xbe105554},
- {0x00004040, 0x00043007},
- {0x00004044, 0x00000000},
-};
-
static const u32 ar9285Modes_9285_1_2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index e6b92ff265fd..d105e43d22e1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3563,14 +3563,24 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
{
struct ath9k_hw_capabilities *pCap = &ah->caps;
int chain;
- u32 regval;
+ u32 regval, value, gpio;
static const u32 switch_chain_reg[AR9300_MAX_CHAINS] = {
AR_PHY_SWITCH_CHAIN_0,
AR_PHY_SWITCH_CHAIN_1,
AR_PHY_SWITCH_CHAIN_2,
};
- u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
+ if (AR_SREV_9485(ah) && (ar9003_hw_get_rx_gain_idx(ah) == 0)) {
+ if (ah->config.xlna_gpio)
+ gpio = ah->config.xlna_gpio;
+ else
+ gpio = AR9300_EXT_LNA_CTL_GPIO_AR9485;
+
+ ath9k_hw_cfg_output(ah, gpio,
+ AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED);
+ }
+
+ value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
@@ -3596,7 +3606,7 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
* 7:4 R/W SWITCH_TABLE_COM_SPDT_WLAN_IDLE
* SWITCH_TABLE_COM_SPDT_WLAN_IDLE
*/
- if (AR_SREV_9462_20(ah) || AR_SREV_9565(ah)) {
+ if (AR_SREV_9462_20_OR_LATER(ah) || AR_SREV_9565(ah)) {
value = ar9003_switch_com_spdt_get(ah, is2ghz);
REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
AR_SWITCH_TABLE_COM_SPDT_ALL, value);
@@ -3796,7 +3806,13 @@ static void ar9003_hw_atten_apply(struct ath_hw *ah, struct ath9k_channel *chan)
REG_RMW_FIELD(ah, ext_atten_reg[i],
AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB, value);
- value = ar9003_hw_atten_chain_get_margin(ah, i, chan);
+ if (AR_SREV_9485(ah) &&
+ (ar9003_hw_get_rx_gain_idx(ah) == 0) &&
+ ah->config.xatten_margin_cfg)
+ value = 5;
+ else
+ value = ar9003_hw_atten_chain_get_margin(ah, i, chan);
+
REG_RMW_FIELD(ah, ext_atten_reg[i],
AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN,
value);
@@ -4043,8 +4059,9 @@ static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah)
{
u32 data, ko, kg;
- if (!AR_SREV_9462_20(ah))
+ if (!AR_SREV_9462_20_OR_LATER(ah))
return;
+
ar9300_otp_read_word(ah, 1, &data);
ko = data & 0xff;
kg = (data >> 8) & 0xff;
@@ -4546,7 +4563,7 @@ static void ar9003_hw_get_target_power_eeprom(struct ath_hw *ah,
is2GHz);
for (i = 0; i < ar9300RateSize; i++) {
- ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n",
+ ath_dbg(common, REGULATORY, "TPC[%02d] 0x%08x\n",
i, targetPowerValT2[i]);
}
}
@@ -4736,7 +4753,7 @@ tempslope:
AR_PHY_TPC_19_ALPHA_THERM, temp_slope);
}
- if (AR_SREV_9462_20(ah))
+ if (AR_SREV_9462_20_OR_LATER(ah))
REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
AR_PHY_TPC_19_B1_ALPHA_THERM, temp_slope);
@@ -5272,7 +5289,7 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
return;
for (i = 0; i < ar9300RateSize; i++) {
- ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n",
+ ath_dbg(common, REGULATORY, "TPC[%02d] 0x%08x\n",
i, targetPowerValT2[i]);
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index a3523c969a3a..d402cb32283f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -24,6 +24,7 @@
#include "ar955x_1p0_initvals.h"
#include "ar9580_1p0_initvals.h"
#include "ar9462_2p0_initvals.h"
+#include "ar9462_2p1_initvals.h"
#include "ar9565_1p0_initvals.h"
/* General hardware code for the AR9003 hadware family */
@@ -197,6 +198,31 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
ar9485_1_1_pcie_phy_clkreq_disable_L1);
+ } else if (AR_SREV_9462_21(ah)) {
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ ar9462_2p1_mac_core);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar9462_2p1_mac_postamble);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar9462_2p1_baseband_core);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar9462_2p1_baseband_postamble);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar9462_2p1_radio_core);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ ar9462_2p1_radio_postamble);
+ INIT_INI_ARRAY(&ah->ini_radio_post_sys2ant,
+ ar9462_2p1_radio_postamble_sys2ant);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar9462_2p1_soc_preamble);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ ar9462_2p1_soc_postamble);
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9462_2p1_common_rx_gain);
+ INIT_INI_ARRAY(&ah->iniModesFastClock,
+ ar9462_2p1_modes_fast_clock);
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9462_2p1_baseband_core_txfir_coeff_japan_2484);
} else if (AR_SREV_9462_20(ah)) {
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], ar9462_2p0_mac_core);
@@ -407,6 +433,9 @@ static void ar9003_tx_gain_table_mode0(struct ath_hw *ah)
else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9580_1p0_lowest_ob_db_tx_gain_table);
+ else if (AR_SREV_9462_21(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9462_2p1_modes_low_ob_db_tx_gain);
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9462_modes_low_ob_db_tx_gain_table_2p0);
@@ -438,6 +467,9 @@ static void ar9003_tx_gain_table_mode1(struct ath_hw *ah)
else if (AR_SREV_9550(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar955x_1p0_modes_no_xpa_tx_gain_table);
+ else if (AR_SREV_9462_21(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9462_2p1_modes_high_ob_db_tx_gain);
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9462_modes_high_ob_db_tx_gain_table_2p0);
@@ -507,6 +539,12 @@ static void ar9003_tx_gain_table_mode4(struct ath_hw *ah)
else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9580_1p0_mixed_ob_db_tx_gain_table);
+ else if (AR_SREV_9462_21(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9462_2p1_modes_mix_ob_db_tx_gain);
+ else if (AR_SREV_9462_20(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9462_modes_mix_ob_db_tx_gain_table_2p0);
else
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9300Modes_mixed_ob_db_tx_gain_table_2p2);
@@ -584,6 +622,9 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
} else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9580_1p0_rx_gain_table);
+ else if (AR_SREV_9462_21(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9462_2p1_common_rx_gain);
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9462_common_rx_gain_table_2p0);
@@ -606,6 +647,9 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
else if (AR_SREV_9485_11(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9485Common_wo_xlna_rx_gain_1_1);
+ else if (AR_SREV_9462_21(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9462_2p1_common_wo_xlna_rx_gain);
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9462_common_wo_xlna_rx_gain_table_2p0);
@@ -627,9 +671,40 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
static void ar9003_rx_gain_table_mode2(struct ath_hw *ah)
{
- if (AR_SREV_9462_20(ah))
+ if (AR_SREV_9462_21(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9462_2p1_common_mixed_rx_gain);
+ INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_core,
+ ar9462_2p1_baseband_core_mix_rxgain);
+ INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
+ ar9462_2p1_baseband_postamble_mix_rxgain);
+ INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+ ar9462_2p1_baseband_postamble_5g_xlna);
+ } else if (AR_SREV_9462_20(ah)) {
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9462_common_mixed_rx_gain_table_2p0);
+ INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_core,
+ ar9462_2p0_baseband_core_mix_rxgain);
+ INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
+ ar9462_2p0_baseband_postamble_mix_rxgain);
+ INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+ ar9462_2p0_baseband_postamble_5g_xlna);
+ }
+}
+
+static void ar9003_rx_gain_table_mode3(struct ath_hw *ah)
+{
+ if (AR_SREV_9462_21(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9462_2p1_common_5g_xlna_only_rx_gain);
+ INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+ ar9462_2p1_baseband_postamble_5g_xlna);
+ } else if (AR_SREV_9462_20(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9462_2p0_5g_xlna_only_rxgain);
+ INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+ ar9462_2p0_baseband_postamble_5g_xlna);
+ }
}
static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
@@ -645,6 +720,9 @@ static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
case 2:
ar9003_rx_gain_table_mode2(ah);
break;
+ case 3:
+ ar9003_rx_gain_table_mode3(ah);
+ break;
}
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 301bf72c53bf..5163abd3937c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -469,6 +469,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
rxs->rs_status = 0;
rxs->rs_flags = 0;
+ rxs->flag = 0;
rxs->rs_datalen = rxsp->status2 & AR_DataLen;
rxs->rs_tstamp = rxsp->status3;
@@ -493,8 +494,8 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
rxs->rs_isaggr = (rxsp->status11 & AR_RxAggr) ? 1 : 0;
rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0;
rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7);
- rxs->rs_flags = (rxsp->status4 & AR_GI) ? ATH9K_RX_GI : 0;
- rxs->rs_flags |= (rxsp->status4 & AR_2040) ? ATH9K_RX_2040 : 0;
+ rxs->flag |= (rxsp->status4 & AR_GI) ? RX_FLAG_SHORT_GI : 0;
+ rxs->flag |= (rxsp->status4 & AR_2040) ? RX_FLAG_40MHZ : 0;
rxs->evm0 = rxsp->status6;
rxs->evm1 = rxsp->status7;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index 09c1f9da67a0..6343cc91953e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -454,6 +454,8 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
if (accum_cnt <= thresh_accum_cnt)
continue;
+ max_index++;
+
/* sum(tx amplitude) */
accum_tx = ((data_L[i] >> 16) & 0xffff) |
((data_U[i] & 0x7ff) << 16);
@@ -468,20 +470,21 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
accum_tx <<= scale_factor;
accum_rx <<= scale_factor;
- x_est[i + 1] = (((accum_tx + accum_cnt) / accum_cnt) + 32) >>
- scale_factor;
+ x_est[max_index] =
+ (((accum_tx + accum_cnt) / accum_cnt) + 32) >>
+ scale_factor;
- Y[i + 1] = ((((accum_rx + accum_cnt) / accum_cnt) + 32) >>
+ Y[max_index] =
+ ((((accum_rx + accum_cnt) / accum_cnt) + 32) >>
scale_factor) +
- (1 << scale_factor) * max_index + 16;
+ (1 << scale_factor) * i + 16;
if (accum_ang >= (1 << 26))
accum_ang -= 1 << 27;
- theta[i + 1] = ((accum_ang * (1 << scale_factor)) + accum_cnt) /
- accum_cnt;
-
- max_index++;
+ theta[max_index] =
+ ((accum_ang * (1 << scale_factor)) + accum_cnt) /
+ accum_cnt;
}
/*
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index e1714d7c9eeb..1f694ab3cc78 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -735,22 +735,53 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
return -EINVAL;
}
+ /*
+ * SOC, MAC, BB, RADIO initvals.
+ */
for (i = 0; i < ATH_INI_NUM_SPLIT; i++) {
ar9003_hw_prog_ini(ah, &ah->iniSOC[i], modesIndex);
ar9003_hw_prog_ini(ah, &ah->iniMac[i], modesIndex);
ar9003_hw_prog_ini(ah, &ah->iniBB[i], modesIndex);
ar9003_hw_prog_ini(ah, &ah->iniRadio[i], modesIndex);
- if (i == ATH_INI_POST && AR_SREV_9462_20(ah))
+ if (i == ATH_INI_POST && AR_SREV_9462_20_OR_LATER(ah))
ar9003_hw_prog_ini(ah,
&ah->ini_radio_post_sys2ant,
modesIndex);
}
+ /*
+ * RXGAIN initvals.
+ */
REG_WRITE_ARRAY(&ah->iniModesRxGain, 1, regWrites);
+
+ if (AR_SREV_9462_20_OR_LATER(ah)) {
+ /*
+ * CUS217 mix LNA mode.
+ */
+ if (ar9003_hw_get_rx_gain_idx(ah) == 2) {
+ REG_WRITE_ARRAY(&ah->ini_modes_rxgain_bb_core,
+ 1, regWrites);
+ REG_WRITE_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
+ modesIndex, regWrites);
+ }
+
+ /*
+ * 5G-XLNA
+ */
+ if ((ar9003_hw_get_rx_gain_idx(ah) == 2) ||
+ (ar9003_hw_get_rx_gain_idx(ah) == 3)) {
+ REG_WRITE_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+ modesIndex, regWrites);
+ }
+ }
+
if (AR_SREV_9550(ah))
REG_WRITE_ARRAY(&ah->ini_modes_rx_gain_bounds, modesIndex,
regWrites);
+ /*
+ * TXGAIN initvals.
+ */
if (AR_SREV_9550(ah)) {
int modes_txgain_index;
@@ -772,8 +803,14 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
REG_WRITE_ARRAY(&ah->iniModesFastClock,
modesIndex, regWrites);
+ /*
+ * Clock frequency initvals.
+ */
REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites);
+ /*
+ * JAPAN regulatory.
+ */
if (chan->channel == 2484)
ar9003_hw_prog_ini(ah, &ah->iniCckfirJapan2484, 1);
@@ -905,7 +942,12 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_channel *chan = ah->curchan;
- struct ar5416AniState *aniState = &chan->ani;
+ struct ar5416AniState *aniState = &ah->ani;
+ int m1ThreshLow, m2ThreshLow;
+ int m1Thresh, m2Thresh;
+ int m2CountThr, m2CountThrLow;
+ int m1ThreshLowExt, m2ThreshLowExt;
+ int m1ThreshExt, m2ThreshExt;
s32 value, value2;
switch (cmd & ah->ani_function) {
@@ -919,6 +961,61 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
*/
u32 on = param ? 1 : 0;
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
+ goto skip_ws_det;
+
+ m1ThreshLow = on ?
+ aniState->iniDef.m1ThreshLow : m1ThreshLow_off;
+ m2ThreshLow = on ?
+ aniState->iniDef.m2ThreshLow : m2ThreshLow_off;
+ m1Thresh = on ?
+ aniState->iniDef.m1Thresh : m1Thresh_off;
+ m2Thresh = on ?
+ aniState->iniDef.m2Thresh : m2Thresh_off;
+ m2CountThr = on ?
+ aniState->iniDef.m2CountThr : m2CountThr_off;
+ m2CountThrLow = on ?
+ aniState->iniDef.m2CountThrLow : m2CountThrLow_off;
+ m1ThreshLowExt = on ?
+ aniState->iniDef.m1ThreshLowExt : m1ThreshLowExt_off;
+ m2ThreshLowExt = on ?
+ aniState->iniDef.m2ThreshLowExt : m2ThreshLowExt_off;
+ m1ThreshExt = on ?
+ aniState->iniDef.m1ThreshExt : m1ThreshExt_off;
+ m2ThreshExt = on ?
+ aniState->iniDef.m2ThreshExt : m2ThreshExt_off;
+
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
+ m1ThreshLow);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
+ m2ThreshLow);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M1_THRESH,
+ m1Thresh);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M2_THRESH,
+ m2Thresh);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M2COUNT_THR,
+ m2CountThr);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
+ m2CountThrLow);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M1_THRESH_LOW,
+ m1ThreshLowExt);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M2_THRESH_LOW,
+ m2ThreshLowExt);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M1_THRESH,
+ m1ThreshExt);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M2_THRESH,
+ m2ThreshExt);
+skip_ws_det:
if (on)
REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
@@ -1173,7 +1270,7 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
struct ath9k_ani_default *iniDef;
u32 val;
- aniState = &ah->curchan->ani;
+ aniState = &ah->ani;
iniDef = &aniState->iniDef;
ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
@@ -1214,7 +1311,7 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
/* these levels just got reset to defaults by the INI */
aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
- aniState->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG;
+ aniState->ofdmWeakSigDetect = true;
aniState->mrcCCK = true;
}
@@ -1415,7 +1512,7 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
ar9003_hw_prog_ini(ah, &ah->iniBB[ATH_INI_POST], modesIndex);
ar9003_hw_prog_ini(ah, &ah->iniRadio[ATH_INI_POST], modesIndex);
- if (AR_SREV_9462_20(ah))
+ if (AR_SREV_9462_20_OR_LATER(ah))
ar9003_hw_prog_ini(ah, &ah->ini_radio_post_sys2ant,
modesIndex);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index e71774196c01..d4d39f305a0b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -351,6 +351,8 @@
#define AR_PHY_CCA_NOM_VAL_9330_2GHZ -118
+#define AR9300_EXT_LNA_CTL_GPIO_AR9485 9
+
/*
* AGC Field Definitions
*/
@@ -952,7 +954,7 @@
#define AR_PHY_TPC_5_B1 (AR_SM1_BASE + 0x208)
#define AR_PHY_TPC_6_B1 (AR_SM1_BASE + 0x20c)
#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220)
-#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + (AR_SREV_AR9462(ah) ? \
+#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + (AR_SREV_9462_20_OR_LATER(ah) ? \
0x280 : 0x240))
#define AR_PHY_TPC_19_B1 (AR_SM1_BASE + 0x240)
#define AR_PHY_TPC_19_B1_ALPHA_THERM 0xff
@@ -1046,7 +1048,7 @@
#define AR_GLB_GPIO_CONTROL (AR_GLB_BASE)
#define AR_PHY_GLB_CONTROL (AR_GLB_BASE + 0x44)
#define AR_GLB_SCRATCH(_ah) (AR_GLB_BASE + \
- (AR_SREV_9462_20(_ah) ? 0x4c : 0x50))
+ (AR_SREV_9462_20_OR_LATER(_ah) ? 0x4c : 0x50))
#define AR_GLB_STATUS (AR_GLB_BASE + 0x48)
/*
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index 999ab08c34e6..092b9d412e7f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -78,7 +78,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
- {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
{0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -879,6 +879,69 @@ static const u32 ar9462_2p0_radio_postamble[][5] = {
{0x0001650c, 0x48000000, 0x40000000, 0x40000000, 0x40000000},
};
+static const u32 ar9462_modes_mix_ob_db_tx_gain_table_2p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+ {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x0000d0da, 0x0000d0da, 0x0000d0de, 0x0000d0de},
+ {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x18022622, 0x18022622, 0x12000400, 0x12000400},
+ {0x0000a518, 0x1b022822, 0x1b022822, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
+ {0x0000a520, 0x22022c41, 0x22022c41, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x28023042, 0x28023042, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x2c023044, 0x2c023044, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x2f023644, 0x2f023644, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x34025643, 0x34025643, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x38025a44, 0x38025a44, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x3b025e45, 0x3b025e45, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x38001640, 0x38001640},
+ {0x0000a540, 0x48025e6c, 0x48025e6c, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x55025eb3, 0x55025eb3, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x58025ef3, 0x58025ef3, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x62025f56, 0x62025f56, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x66027f56, 0x66027f56, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x70049f56, 0x70049f56, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x751ffff6, 0x751ffff6, 0x5c001eec, 0x5c001eec},
+ {0x0000a568, 0x751ffff6, 0x751ffff6, 0x5e001ef0, 0x5e001ef0},
+ {0x0000a56c, 0x751ffff6, 0x751ffff6, 0x60001ef4, 0x60001ef4},
+ {0x0000a570, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
+ {0x0000a574, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
+ {0x0000a578, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
+ {0x0000a57c, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+ {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+ {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+};
+
static const u32 ar9462_modes_high_ob_db_tx_gain_table_2p0[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
@@ -1449,4 +1512,284 @@ static const u32 ar9462_common_mixed_rx_gain_table_2p0[][2] = {
{0x0000b1fc, 0x00000196},
};
+static const u32 ar9462_2p0_baseband_postamble_5g_xlna[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
+};
+
+static const u32 ar9462_2p0_5g_xlna_only_rxgain[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x03820190},
+ {0x0000a030, 0x03840383},
+ {0x0000a034, 0x03880385},
+ {0x0000a038, 0x038a0389},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x29292929},
+ {0x0000a084, 0x29292929},
+ {0x0000a088, 0x29292929},
+ {0x0000a08c, 0x29292929},
+ {0x0000a090, 0x22292929},
+ {0x0000a094, 0x1d1d2222},
+ {0x0000a098, 0x0c111117},
+ {0x0000a09c, 0x00030303},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x2a2d2f32},
+ {0x0000b084, 0x21232328},
+ {0x0000b088, 0x19191c1e},
+ {0x0000b08c, 0x12141417},
+ {0x0000b090, 0x07070e0e},
+ {0x0000b094, 0x03030305},
+ {0x0000b098, 0x00000003},
+ {0x0000b09c, 0x00000000},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9462_2p0_baseband_core_mix_rxgain[][2] = {
+ /* Addr allmodes */
+ {0x00009fd0, 0x0a2d6b93},
+};
+
+static const u32 ar9462_2p0_baseband_postamble_mix_rxgain[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a01ae, 0x206a01ae},
+ {0x00009824, 0x63c640de, 0x5ac640d0, 0x63c640da, 0x63c640da},
+ {0x00009828, 0x0796be89, 0x0696b081, 0x0916be81, 0x0916be81},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000d8, 0x6c4000d8},
+ {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec86d2e, 0x7ec86d2e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32395c5e},
+};
+
#endif /* INITVALS_9462_2P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
new file mode 100644
index 000000000000..4dbc294df7e3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
@@ -0,0 +1,1774 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9462_2P1_H
+#define INITVALS_9462_2P1_H
+
+/* AR9462 2.1 */
+
+static const u32 ar9462_2p1_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x000e0085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00001810, 0x0f000003},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00080000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x0000804c, 0xffffffff},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c20},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00b00005},
+ {0x000080d8, 0x00400002},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x00008170, 0x18486e00},
+ {0x00008174, 0x33332210},
+ {0x00008178, 0x00000000},
+ {0x0000817c, 0x00020000},
+ {0x000081c4, 0x33332210},
+ {0x000081c8, 0x00000000},
+ {0x000081cc, 0x00000000},
+ {0x000081d4, 0x00000000},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f400},
+ {0x00008248, 0x00000800},
+ {0x0000824c, 0x0001e800},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x99c00010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00000140},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x0000001f},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0xffff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xaa48107b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x000301ff},
+};
+
+static const u32 ar9462_2p1_mac_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+};
+
+static const u32 ar9462_2p1_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a9f6b},
+ {0x0000980c, 0x04900000},
+ {0x00009814, 0x9280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x6400a290},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x0d000600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x32440bbe},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098e4, 0x01ffffff},
+ {0x000098e8, 0x01ffffff},
+ {0x000098ec, 0x01ffffff},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009bf0, 0x80000000},
+ {0x00009c04, 0xff55ff55},
+ {0x00009c08, 0x0320ff55},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x9883800a},
+ {0x00009d10, 0x01834061},
+ {0x00009d14, 0x00c0040b},
+ {0x00009d18, 0x00000000},
+ {0x00009e08, 0x0038230c},
+ {0x00009e24, 0x990bb515},
+ {0x00009e28, 0x0c6f0000},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x15262820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009e54, 0xe4c555c2},
+ {0x00009e58, 0xfd857722},
+ {0x00009e5c, 0xe9198724},
+ {0x00009fc0, 0x803e4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x00009fd0, 0x0a193b93},
+ {0x0000a20c, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a2a0, 0x00000001},
+ {0x0000a2c0, 0x00000001},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2cc, 0x18c43433},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000006},
+ {0x0000a3f8, 0x0c9bd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739ce},
+ {0x0000a418, 0x2d001dce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00100000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x05000080},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a454, 0x07000000},
+ {0x0000a644, 0xbfad9d74},
+ {0x0000a648, 0x0048060a},
+ {0x0000a64c, 0x00002037},
+ {0x0000a670, 0x03020100},
+ {0x0000a674, 0x09080504},
+ {0x0000a678, 0x0d0c0b0a},
+ {0x0000a67c, 0x13121110},
+ {0x0000a680, 0x31301514},
+ {0x0000a684, 0x35343332},
+ {0x0000a688, 0x00000036},
+ {0x0000a690, 0x00000838},
+ {0x0000a6b0, 0x0000000a},
+ {0x0000a6b4, 0x00512c01},
+ {0x0000a7c0, 0x00000000},
+ {0x0000a7c4, 0xfffffffc},
+ {0x0000a7c8, 0x00000000},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000000},
+ {0x0000a7f0, 0x80000000},
+ {0x0000a8d0, 0x004b6a8e},
+ {0x0000a8d4, 0x00000820},
+ {0x0000a8dc, 0x00000000},
+ {0x0000a8f0, 0x00000000},
+ {0x0000a8f4, 0x00000000},
+ {0x0000abf0, 0x80000000},
+ {0x0000b2d0, 0x00000080},
+ {0x0000b2d4, 0x00000000},
+ {0x0000b2ec, 0x00000000},
+ {0x0000b2f0, 0x00000000},
+ {0x0000b2f4, 0x00000000},
+ {0x0000b2f8, 0x00000000},
+ {0x0000b408, 0x0e79e5c0},
+ {0x0000b40c, 0x00820820},
+ {0x0000b420, 0x00000000},
+ {0x0000b6b0, 0x0000000a},
+ {0x0000b6b4, 0x00000001},
+};
+
+static const u32 ar9462_2p1_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a800d},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae},
+ {0x00009824, 0x63c640de, 0x5ac640d0, 0x5ac640d0, 0x63c640da},
+ {0x00009828, 0x0796be89, 0x0696b081, 0x0696b881, 0x09143e81},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
+ {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
+ {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a2},
+ {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8},
+ {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
+ {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x01318fc0, 0x01318fc4, 0x01318fc4, 0x01318fc0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a22c, 0x01026a2f, 0x01026a27, 0x01026a2f, 0x01026a2f},
+ {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
+ {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
+ {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+ {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
+ {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
+ {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a3a4, 0x00000050, 0x00000050, 0x00000000, 0x00000000},
+ {0x0000a3a8, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa},
+ {0x0000a3ac, 0xaaaaaa00, 0xaa30aa30, 0xaaaaaa00, 0xaaaaaa00},
+ {0x0000a41c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+ {0x0000a420, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce},
+ {0x0000a424, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+ {0x0000a428, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+ {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550},
+};
+
+static const u32 ar9462_2p1_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db6db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73f00000},
+ {0x0001600c, 0x00000000},
+ {0x00016010, 0x6d820001},
+ {0x00016040, 0x7f80fff8},
+ {0x0001604c, 0x2699e04f},
+ {0x00016050, 0x6db6db6c},
+ {0x00016058, 0x6c200000},
+ {0x00016080, 0x000c0000},
+ {0x00016084, 0x9a68048c},
+ {0x00016088, 0x54214514},
+ {0x0001608c, 0x1203040b},
+ {0x00016090, 0x24926490},
+ {0x00016098, 0xd2888888},
+ {0x000160a0, 0x0a108ffe},
+ {0x000160a4, 0x812fc491},
+ {0x000160a8, 0x423c8000},
+ {0x000160b4, 0x92000000},
+ {0x000160b8, 0x0285dddc},
+ {0x000160bc, 0x02908888},
+ {0x000160c0, 0x00adb6d0},
+ {0x000160c4, 0x6db6db60},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x0de6c1b0},
+ {0x00016100, 0x3fffbe04},
+ {0x00016104, 0xfff80000},
+ {0x00016108, 0x00200400},
+ {0x00016110, 0x00000000},
+ {0x00016144, 0x02084080},
+ {0x00016148, 0x000080c0},
+ {0x00016280, 0x050a0001},
+ {0x00016284, 0x3d841418},
+ {0x00016288, 0x00000000},
+ {0x0001628c, 0xe3000000},
+ {0x00016290, 0xa1005080},
+ {0x00016294, 0x00000020},
+ {0x00016298, 0x54a82900},
+ {0x00016340, 0x121e4276},
+ {0x00016344, 0x00300000},
+ {0x00016400, 0x36db6db6},
+ {0x00016404, 0x6db6db40},
+ {0x00016408, 0x73f00000},
+ {0x0001640c, 0x00000000},
+ {0x00016410, 0x6c800001},
+ {0x00016440, 0x7f80fff8},
+ {0x0001644c, 0x4699e04f},
+ {0x00016450, 0x6db6db6c},
+ {0x00016500, 0x3fffbe04},
+ {0x00016504, 0xfff80000},
+ {0x00016508, 0x00200400},
+ {0x00016510, 0x00000000},
+ {0x00016544, 0x02084080},
+ {0x00016548, 0x000080c0},
+};
+
+static const u32 ar9462_2p1_radio_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0001609c, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524},
+ {0x000160b0, 0x01d67f70, 0x01d67f70, 0x01d67f70, 0x01d67f70},
+ {0x0001610c, 0x48000000, 0x40000000, 0x40000000, 0x40000000},
+ {0x0001650c, 0x48000000, 0x40000000, 0x40000000, 0x40000000},
+};
+
+static const u32 ar9462_2p1_soc_preamble[][2] = {
+ /* Addr allmodes */
+ {0x000040a4, 0x00a0c1c9},
+ {0x00007020, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+};
+
+static const u32 ar9462_2p1_soc_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00007010, 0x00000033, 0x00000033, 0x00000033, 0x00000033},
+};
+
+static const u32 ar9462_2p1_radio_postamble_sys2ant[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808},
+ {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+ {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+};
+
+static const u32 ar9462_2p1_common_rx_gain[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x01910190},
+ {0x0000a030, 0x01930192},
+ {0x0000a034, 0x01950194},
+ {0x0000a038, 0x038a0196},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x22222229},
+ {0x0000a084, 0x1d1d1d1d},
+ {0x0000a088, 0x1d1d1d1d},
+ {0x0000a08c, 0x1d1d1d1d},
+ {0x0000a090, 0x171d1d1d},
+ {0x0000a094, 0x11111717},
+ {0x0000a098, 0x00030311},
+ {0x0000a09c, 0x00000000},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x2a2d2f32},
+ {0x0000b084, 0x21232328},
+ {0x0000b088, 0x19191c1e},
+ {0x0000b08c, 0x12141417},
+ {0x0000b090, 0x07070e0e},
+ {0x0000b094, 0x03030305},
+ {0x0000b098, 0x00000003},
+ {0x0000b09c, 0x00000000},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9462_2p1_common_mixed_rx_gain[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x03820190},
+ {0x0000a030, 0x03840383},
+ {0x0000a034, 0x03880385},
+ {0x0000a038, 0x038a0389},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x29292929},
+ {0x0000a084, 0x29292929},
+ {0x0000a088, 0x29292929},
+ {0x0000a08c, 0x29292929},
+ {0x0000a090, 0x22292929},
+ {0x0000a094, 0x1d1d2222},
+ {0x0000a098, 0x0c111117},
+ {0x0000a09c, 0x00030303},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x2a2d2f32},
+ {0x0000b084, 0x21232328},
+ {0x0000b088, 0x19191c1e},
+ {0x0000b08c, 0x12141417},
+ {0x0000b090, 0x07070e0e},
+ {0x0000b094, 0x03030305},
+ {0x0000b098, 0x00000003},
+ {0x0000b09c, 0x00000000},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9462_2p1_baseband_core_mix_rxgain[][2] = {
+ /* Addr allmodes */
+ {0x00009fd0, 0x0a2d6b93},
+};
+
+static const u32 ar9462_2p1_baseband_postamble_mix_rxgain[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a01ae, 0x206a01ae},
+ {0x00009824, 0x63c640de, 0x5ac640d0, 0x63c640da, 0x63c640da},
+ {0x00009828, 0x0796be89, 0x0696b081, 0x0916be81, 0x0916be81},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000d8, 0x6c4000d8},
+ {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec86d2e, 0x7ec86d2e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32395c5e},
+};
+
+static const u32 ar9462_2p1_baseband_postamble_5g_xlna[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
+};
+
+static const u32 ar9462_2p1_common_wo_xlna_rx_gain[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x03820190},
+ {0x0000a030, 0x03840383},
+ {0x0000a034, 0x03880385},
+ {0x0000a038, 0x038a0389},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x29292929},
+ {0x0000a084, 0x29292929},
+ {0x0000a088, 0x29292929},
+ {0x0000a08c, 0x29292929},
+ {0x0000a090, 0x22292929},
+ {0x0000a094, 0x1d1d2222},
+ {0x0000a098, 0x0c111117},
+ {0x0000a09c, 0x00030303},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x32323232},
+ {0x0000b084, 0x2f2f3232},
+ {0x0000b088, 0x23282a2d},
+ {0x0000b08c, 0x1c1e2123},
+ {0x0000b090, 0x14171919},
+ {0x0000b094, 0x0e0e1214},
+ {0x0000b098, 0x03050707},
+ {0x0000b09c, 0x00030303},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9462_2p1_common_5g_xlna_only_rx_gain[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x03820190},
+ {0x0000a030, 0x03840383},
+ {0x0000a034, 0x03880385},
+ {0x0000a038, 0x038a0389},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x29292929},
+ {0x0000a084, 0x29292929},
+ {0x0000a088, 0x29292929},
+ {0x0000a08c, 0x29292929},
+ {0x0000a090, 0x22292929},
+ {0x0000a094, 0x1d1d2222},
+ {0x0000a098, 0x0c111117},
+ {0x0000a09c, 0x00030303},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x2a2d2f32},
+ {0x0000b084, 0x21232328},
+ {0x0000b088, 0x19191c1e},
+ {0x0000b08c, 0x12141417},
+ {0x0000b090, 0x07070e0e},
+ {0x0000b094, 0x03030305},
+ {0x0000b098, 0x00000003},
+ {0x0000b09c, 0x00000000},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9462_2p1_modes_low_ob_db_tx_gain[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+ {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
+ {0x00016048, 0x64992060, 0x64992060, 0x64992060, 0x64992060},
+ {0x00016054, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
+ {0x00016444, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
+ {0x00016448, 0x64992000, 0x64992000, 0x64992000, 0x64992000},
+ {0x00016454, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
+};
+
+static const u32 ar9462_2p1_modes_high_ob_db_tx_gain[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+ {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050da, 0x000050da, 0x000050de, 0x000050de},
+ {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
+ {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
+ {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x55025eb3, 0x55025eb3, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x58025ef3, 0x58025ef3, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x44001a84, 0x44001a84},
+ {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x751ffff6, 0x751ffff6, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x751ffff6, 0x751ffff6, 0x58001ef0, 0x58001ef0},
+ {0x0000a56c, 0x751ffff6, 0x751ffff6, 0x5a001ef4, 0x5a001ef4},
+ {0x0000a570, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
+ {0x0000a574, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
+ {0x0000a578, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
+ {0x0000a57c, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+ {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+ {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x056d82e4, 0x056d82e4, 0x056d82e4, 0x056d82e4},
+ {0x00016048, 0x8db49060, 0x8db49060, 0x8db49060, 0x8db49060},
+ {0x00016054, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
+ {0x00016444, 0x056d82e4, 0x056d82e4, 0x056d82e4, 0x056d82e4},
+ {0x00016448, 0x8db49000, 0x8db49000, 0x8db49000, 0x8db49000},
+ {0x00016454, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
+};
+
+static const u32 ar9462_2p1_modes_mix_ob_db_tx_gain[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+ {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x0000d0da, 0x0000d0da, 0x0000d0de, 0x0000d0de},
+ {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x18022622, 0x18022622, 0x12000400, 0x12000400},
+ {0x0000a518, 0x1b022822, 0x1b022822, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
+ {0x0000a520, 0x22022c41, 0x22022c41, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x28023042, 0x28023042, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x2c023044, 0x2c023044, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x2f023644, 0x2f023644, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x34025643, 0x34025643, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x38025a44, 0x38025a44, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x3b025e45, 0x3b025e45, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x38001640, 0x38001640},
+ {0x0000a540, 0x48025e6c, 0x48025e6c, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x55025eb3, 0x55025eb3, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x58025ef3, 0x58025ef3, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x62025f56, 0x62025f56, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x66027f56, 0x66027f56, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x70049f56, 0x70049f56, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x751ffff6, 0x751ffff6, 0x5c001eec, 0x5c001eec},
+ {0x0000a568, 0x751ffff6, 0x751ffff6, 0x5e001ef0, 0x5e001ef0},
+ {0x0000a56c, 0x751ffff6, 0x751ffff6, 0x60001ef4, 0x60001ef4},
+ {0x0000a570, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
+ {0x0000a574, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
+ {0x0000a578, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
+ {0x0000a57c, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+ {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+ {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+};
+
+static const u32 ar9462_2p1_modes_fast_clock[][3] = {
+ /* Addr 5G_HT20 5G_HT40 */
+ {0x00001030, 0x00000268, 0x000004d0},
+ {0x00001070, 0x0000018c, 0x00000318},
+ {0x000010b0, 0x00000fd0, 0x00001fa0},
+ {0x00008014, 0x044c044c, 0x08980898},
+ {0x0000801c, 0x148ec02b, 0x148ec057},
+ {0x00008318, 0x000044c0, 0x00008980},
+ {0x00009e00, 0x0372131c, 0x0372131c},
+ {0x0000a230, 0x0000400b, 0x00004016},
+ {0x0000a254, 0x00000898, 0x00001130},
+};
+
+static const u32 ar9462_2p1_baseband_core_txfir_coeff_japan_2484[][2] = {
+ /* Addr allmodes */
+ {0x0000a398, 0x00000000},
+ {0x0000a39c, 0x6f7f0301},
+ {0x0000a3a0, 0xca9228ee},
+};
+
+#endif /* INITVALS_9462_2P1_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 42b03dc39d14..c1224b5a257b 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -296,6 +296,7 @@ struct ath_tx {
struct ath_txq txq[ATH9K_NUM_TX_QUEUES];
struct ath_descdma txdma;
struct ath_txq *txq_map[IEEE80211_NUM_ACS];
+ struct ath_txq *uapsdq;
u32 txq_max_pending[IEEE80211_NUM_ACS];
u16 max_aggr_framelen[IEEE80211_NUM_ACS][4][32];
};
@@ -343,6 +344,8 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop);
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath_tx_control *txctl);
+void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct sk_buff *skb);
void ath_tx_tasklet(struct ath_softc *sc);
void ath_tx_edma_tasklet(struct ath_softc *sc);
int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
@@ -353,6 +356,11 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid
void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an);
void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
struct ath_node *an);
+void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u16 tids, int nframes,
+ enum ieee80211_frame_release_type reason,
+ bool more_data);
/********/
/* VIFs */
@@ -623,6 +631,11 @@ void ath_ant_comb_update(struct ath_softc *sc);
/* Main driver core */
/********************/
+#define ATH9K_PCI_CUS198 0x0001
+#define ATH9K_PCI_CUS230 0x0002
+#define ATH9K_PCI_CUS217 0x0004
+#define ATH9K_PCI_WOW 0x0008
+
/*
* Default cache line size, in bytes.
* Used when PCI device not fully initialized by bootrom/BIOS
@@ -642,6 +655,7 @@ enum sc_op_flags {
SC_OP_ANI_RUN,
SC_OP_PRIM_STA_VIF,
SC_OP_HW_RESET,
+ SC_OP_SCANNING,
};
/* Powersave flags */
@@ -706,6 +720,7 @@ struct ath_softc {
unsigned int hw_busy_count;
unsigned long sc_flags;
+ unsigned long driver_data;
u32 intrstatus;
u16 ps_flags; /* PS_* */
@@ -755,7 +770,6 @@ struct ath_softc {
struct rchan *rfs_chan_spec_scan;
enum spectral_mode spectral_mode;
struct ath_spec_scan spec_config;
- int scanning;
#ifdef CONFIG_PM_SLEEP
atomic_t wow_got_bmiss_intr;
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 2ff570f7f8ff..1a17732bb089 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -39,7 +39,8 @@ static void ath9k_beaconq_config(struct ath_softc *sc)
ath9k_hw_get_txq_props(ah, sc->beacon.beaconq, &qi);
- if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) {
+ if (sc->sc_ah->opmode == NL80211_IFTYPE_AP ||
+ sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT) {
/* Always burst out beacon and CAB traffic. */
qi.tqi_aifs = 1;
qi.tqi_cwmin = 0;
@@ -107,23 +108,6 @@ static void ath9k_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif,
ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
}
-static void ath9k_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
- struct ath_softc *sc = hw->priv;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_tx_control txctl;
-
- memset(&txctl, 0, sizeof(struct ath_tx_control));
- txctl.txq = sc->beacon.cabq;
-
- ath_dbg(common, XMIT, "transmitting CABQ packet, skb: %p\n", skb);
-
- if (ath_tx_start(hw, skb, &txctl) != 0) {
- ath_dbg(common, XMIT, "CABQ TX failed\n");
- ieee80211_free_txskb(hw, skb);
- }
-}
-
static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -205,10 +189,8 @@ static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw,
ath9k_beacon_setup(sc, vif, bf, info->control.rates[0].idx);
- while (skb) {
- ath9k_tx_cabq(hw, skb);
- skb = ieee80211_get_buffered_bc(hw, vif);
- }
+ if (skb)
+ ath_tx_cabq(hw, vif, skb);
return bf;
}
@@ -273,7 +255,8 @@ static int ath9k_beacon_choose_slot(struct ath_softc *sc)
u64 tsf;
int slot;
- if (sc->sc_ah->opmode != NL80211_IFTYPE_AP) {
+ if (sc->sc_ah->opmode != NL80211_IFTYPE_AP &&
+ sc->sc_ah->opmode != NL80211_IFTYPE_MESH_POINT) {
ath_dbg(common, BEACON, "slot 0, tsf: %llu\n",
ath9k_hw_gettsf64(sc->sc_ah));
return 0;
@@ -765,10 +748,10 @@ void ath9k_set_beacon(struct ath_softc *sc)
switch (sc->sc_ah->opmode) {
case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_MESH_POINT:
ath9k_beacon_config_ap(sc, cur_conf);
break;
case NL80211_IFTYPE_ADHOC:
- case NL80211_IFTYPE_MESH_POINT:
ath9k_beacon_config_adhoc(sc, cur_conf);
break;
case NL80211_IFTYPE_STATION:
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 7304e7585009..5e8219a91e25 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -387,7 +387,6 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
if (!caldata) {
chan->noisefloor = nf;
- ah->noise = ath9k_hw_getchan_noise(ah, chan);
return false;
}
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index b37eb8d38811..87454f6c7b4f 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -69,7 +69,7 @@ static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
return -EFAULT;
buf[len] = '\0';
- if (strict_strtoul(buf, 0, &mask))
+ if (kstrtoul(buf, 0, &mask))
return -EINVAL;
common->debug_mask = mask;
@@ -114,7 +114,7 @@ static ssize_t write_file_tx_chainmask(struct file *file, const char __user *use
return -EFAULT;
buf[len] = '\0';
- if (strict_strtoul(buf, 0, &mask))
+ if (kstrtoul(buf, 0, &mask))
return -EINVAL;
ah->txchainmask = mask;
@@ -157,7 +157,7 @@ static ssize_t write_file_rx_chainmask(struct file *file, const char __user *use
return -EFAULT;
buf[len] = '\0';
- if (strict_strtoul(buf, 0, &mask))
+ if (kstrtoul(buf, 0, &mask))
return -EINVAL;
ah->rxchainmask = mask;
@@ -173,25 +173,69 @@ static const struct file_operations fops_rx_chainmask = {
.llseek = default_llseek,
};
-static ssize_t read_file_disable_ani(struct file *file, char __user *user_buf,
+static ssize_t read_file_ani(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- char buf[32];
- unsigned int len;
+ struct ath_hw *ah = sc->sc_ah;
+ unsigned int len = 0, size = 1024;
+ ssize_t retval = 0;
+ char *buf;
- len = sprintf(buf, "%d\n", common->disable_ani);
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ if (common->disable_ani) {
+ len += snprintf(buf + len, size - len, "%s: %s\n",
+ "ANI", "DISABLED");
+ goto exit;
+ }
+
+ len += snprintf(buf + len, size - len, "%15s: %s\n",
+ "ANI", "ENABLED");
+ len += snprintf(buf + len, size - len, "%15s: %u\n",
+ "ANI RESET", ah->stats.ast_ani_reset);
+ len += snprintf(buf + len, size - len, "%15s: %u\n",
+ "SPUR UP", ah->stats.ast_ani_spurup);
+ len += snprintf(buf + len, size - len, "%15s: %u\n",
+ "SPUR DOWN", ah->stats.ast_ani_spurup);
+ len += snprintf(buf + len, size - len, "%15s: %u\n",
+ "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon);
+ len += snprintf(buf + len, size - len, "%15s: %u\n",
+ "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff);
+ len += snprintf(buf + len, size - len, "%15s: %u\n",
+ "MRC-CCK ON", ah->stats.ast_ani_ccklow);
+ len += snprintf(buf + len, size - len, "%15s: %u\n",
+ "MRC-CCK OFF", ah->stats.ast_ani_cckhigh);
+ len += snprintf(buf + len, size - len, "%15s: %u\n",
+ "FIR-STEP UP", ah->stats.ast_ani_stepup);
+ len += snprintf(buf + len, size - len, "%15s: %u\n",
+ "FIR-STEP DOWN", ah->stats.ast_ani_stepdown);
+ len += snprintf(buf + len, size - len, "%15s: %u\n",
+ "INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero);
+ len += snprintf(buf + len, size - len, "%15s: %u\n",
+ "OFDM ERRORS", ah->stats.ast_ani_ofdmerrs);
+ len += snprintf(buf + len, size - len, "%15s: %u\n",
+ "CCK ERRORS", ah->stats.ast_ani_cckerrs);
+exit:
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
}
-static ssize_t write_file_disable_ani(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
+static ssize_t write_file_ani(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- unsigned long disable_ani;
+ unsigned long ani;
char buf[32];
ssize_t len;
@@ -200,12 +244,15 @@ static ssize_t write_file_disable_ani(struct file *file,
return -EFAULT;
buf[len] = '\0';
- if (strict_strtoul(buf, 0, &disable_ani))
+ if (kstrtoul(buf, 0, &ani))
return -EINVAL;
- common->disable_ani = !!disable_ani;
+ if (ani < 0 || ani > 1)
+ return -EINVAL;
+
+ common->disable_ani = !ani;
- if (disable_ani) {
+ if (common->disable_ani) {
clear_bit(SC_OP_ANI_RUN, &sc->sc_flags);
ath_stop_ani(sc);
} else {
@@ -215,9 +262,9 @@ static ssize_t write_file_disable_ani(struct file *file,
return count;
}
-static const struct file_operations fops_disable_ani = {
- .read = read_file_disable_ani,
- .write = write_file_disable_ani,
+static const struct file_operations fops_ani = {
+ .read = read_file_ani,
+ .write = write_file_ani,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
@@ -253,7 +300,7 @@ static ssize_t write_file_ant_diversity(struct file *file,
goto exit;
buf[len] = '\0';
- if (strict_strtoul(buf, 0, &antenna_diversity))
+ if (kstrtoul(buf, 0, &antenna_diversity))
return -EINVAL;
common->antenna_diversity = !!antenna_diversity;
@@ -738,8 +785,6 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_status *ts, struct ath_txq *txq,
unsigned int flags)
{
-#define TX_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].ts\
- [sc->debug.tsidx].c)
int qnum = txq->axq_qnum;
TX_STAT_INC(qnum, tx_pkts_all);
@@ -771,37 +816,6 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
TX_STAT_INC(qnum, data_underrun);
if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN)
TX_STAT_INC(qnum, delim_underrun);
-
-#ifdef CONFIG_ATH9K_MAC_DEBUG
- spin_lock(&sc->debug.samp_lock);
- TX_SAMP_DBG(jiffies) = jiffies;
- TX_SAMP_DBG(rssi_ctl0) = ts->ts_rssi_ctl0;
- TX_SAMP_DBG(rssi_ctl1) = ts->ts_rssi_ctl1;
- TX_SAMP_DBG(rssi_ctl2) = ts->ts_rssi_ctl2;
- TX_SAMP_DBG(rssi_ext0) = ts->ts_rssi_ext0;
- TX_SAMP_DBG(rssi_ext1) = ts->ts_rssi_ext1;
- TX_SAMP_DBG(rssi_ext2) = ts->ts_rssi_ext2;
- TX_SAMP_DBG(rateindex) = ts->ts_rateindex;
- TX_SAMP_DBG(isok) = !!(ts->ts_status & ATH9K_TXERR_MASK);
- TX_SAMP_DBG(rts_fail_cnt) = ts->ts_shortretry;
- TX_SAMP_DBG(data_fail_cnt) = ts->ts_longretry;
- TX_SAMP_DBG(rssi) = ts->ts_rssi;
- TX_SAMP_DBG(tid) = ts->tid;
- TX_SAMP_DBG(qid) = ts->qid;
-
- if (ts->ts_flags & ATH9K_TX_BA) {
- TX_SAMP_DBG(ba_low) = ts->ba_low;
- TX_SAMP_DBG(ba_high) = ts->ba_high;
- } else {
- TX_SAMP_DBG(ba_low) = 0;
- TX_SAMP_DBG(ba_high) = 0;
- }
-
- sc->debug.tsidx = (sc->debug.tsidx + 1) % ATH_DBG_MAX_SAMPLES;
- spin_unlock(&sc->debug.samp_lock);
-#endif
-
-#undef TX_SAMP_DBG
}
static const struct file_operations fops_xmit = {
@@ -915,8 +929,6 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
{
#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++
-#define RX_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].rs\
- [sc->debug.rsidx].c)
RX_STAT_INC(rx_pkts_all);
sc->debug.stats.rxstats.rx_bytes_all += rs->rs_datalen;
@@ -940,27 +952,7 @@ void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
RX_PHY_ERR_INC(rs->rs_phyerr);
}
-#ifdef CONFIG_ATH9K_MAC_DEBUG
- spin_lock(&sc->debug.samp_lock);
- RX_SAMP_DBG(jiffies) = jiffies;
- RX_SAMP_DBG(rssi_ctl0) = rs->rs_rssi_ctl0;
- RX_SAMP_DBG(rssi_ctl1) = rs->rs_rssi_ctl1;
- RX_SAMP_DBG(rssi_ctl2) = rs->rs_rssi_ctl2;
- RX_SAMP_DBG(rssi_ext0) = rs->rs_rssi_ext0;
- RX_SAMP_DBG(rssi_ext1) = rs->rs_rssi_ext1;
- RX_SAMP_DBG(rssi_ext2) = rs->rs_rssi_ext2;
- RX_SAMP_DBG(antenna) = rs->rs_antenna;
- RX_SAMP_DBG(rssi) = rs->rs_rssi;
- RX_SAMP_DBG(rate) = rs->rs_rate;
- RX_SAMP_DBG(is_mybeacon) = rs->is_mybeacon;
-
- sc->debug.rsidx = (sc->debug.rsidx + 1) % ATH_DBG_MAX_SAMPLES;
- spin_unlock(&sc->debug.samp_lock);
-
-#endif
-
#undef RX_PHY_ERR_INC
-#undef RX_SAMP_DBG
}
static const struct file_operations fops_recv = {
@@ -1278,7 +1270,7 @@ static ssize_t write_file_regidx(struct file *file, const char __user *user_buf,
return -EFAULT;
buf[len] = '\0';
- if (strict_strtoul(buf, 0, &regidx))
+ if (kstrtoul(buf, 0, &regidx))
return -EINVAL;
sc->debug.regidx = regidx;
@@ -1323,7 +1315,7 @@ static ssize_t write_file_regval(struct file *file, const char __user *user_buf,
return -EFAULT;
buf[len] = '\0';
- if (strict_strtoul(buf, 0, &regval))
+ if (kstrtoul(buf, 0, &regval))
return -EINVAL;
ath9k_ps_wakeup(sc);
@@ -1485,283 +1477,6 @@ static const struct file_operations fops_modal_eeprom = {
.llseek = default_llseek,
};
-#ifdef CONFIG_ATH9K_MAC_DEBUG
-
-void ath9k_debug_samp_bb_mac(struct ath_softc *sc)
-{
-#define ATH_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].c)
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- unsigned long flags;
- int i;
-
- ath9k_ps_wakeup(sc);
-
- spin_lock_bh(&sc->debug.samp_lock);
-
- spin_lock_irqsave(&common->cc_lock, flags);
- ath_hw_cycle_counters_update(common);
-
- ATH_SAMP_DBG(cc.cycles) = common->cc_ani.cycles;
- ATH_SAMP_DBG(cc.rx_busy) = common->cc_ani.rx_busy;
- ATH_SAMP_DBG(cc.rx_frame) = common->cc_ani.rx_frame;
- ATH_SAMP_DBG(cc.tx_frame) = common->cc_ani.tx_frame;
- spin_unlock_irqrestore(&common->cc_lock, flags);
-
- ATH_SAMP_DBG(noise) = ah->noise;
-
- REG_WRITE_D(ah, AR_MACMISC,
- ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
- (AR_MACMISC_MISC_OBS_BUS_1 <<
- AR_MACMISC_MISC_OBS_BUS_MSB_S)));
-
- for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++)
- ATH_SAMP_DBG(dma_dbg_reg_vals[i]) = REG_READ_D(ah,
- AR_DMADBG_0 + (i * sizeof(u32)));
-
- ATH_SAMP_DBG(pcu_obs) = REG_READ_D(ah, AR_OBS_BUS_1);
- ATH_SAMP_DBG(pcu_cr) = REG_READ_D(ah, AR_CR);
-
- memcpy(ATH_SAMP_DBG(nfCalHist), sc->caldata.nfCalHist,
- sizeof(ATH_SAMP_DBG(nfCalHist)));
-
- sc->debug.sampidx = (sc->debug.sampidx + 1) % ATH_DBG_MAX_SAMPLES;
- spin_unlock_bh(&sc->debug.samp_lock);
- ath9k_ps_restore(sc);
-
-#undef ATH_SAMP_DBG
-}
-
-static int open_file_bb_mac_samps(struct inode *inode, struct file *file)
-{
-#define ATH_SAMP_DBG(c) bb_mac_samp[sampidx].c
- struct ath_softc *sc = inode->i_private;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- struct ieee80211_conf *conf = &common->hw->conf;
- struct ath_dbg_bb_mac_samp *bb_mac_samp;
- struct ath9k_nfcal_hist *h;
- int i, j, qcuOffset = 0, dcuOffset = 0;
- u32 *qcuBase, *dcuBase, size = 30000, len = 0;
- u32 sampidx = 0;
- u8 *buf;
- u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
- u8 nread;
-
- if (test_bit(SC_OP_INVALID, &sc->sc_flags))
- return -EAGAIN;
-
- buf = vmalloc(size);
- if (!buf)
- return -ENOMEM;
- bb_mac_samp = vmalloc(sizeof(*bb_mac_samp) * ATH_DBG_MAX_SAMPLES);
- if (!bb_mac_samp) {
- vfree(buf);
- return -ENOMEM;
- }
- /* Account the current state too */
- ath9k_debug_samp_bb_mac(sc);
-
- spin_lock_bh(&sc->debug.samp_lock);
- memcpy(bb_mac_samp, sc->debug.bb_mac_samp,
- sizeof(*bb_mac_samp) * ATH_DBG_MAX_SAMPLES);
- len += snprintf(buf + len, size - len,
- "Current Sample Index: %d\n", sc->debug.sampidx);
- spin_unlock_bh(&sc->debug.samp_lock);
-
- len += snprintf(buf + len, size - len,
- "Raw DMA Debug Dump:\n");
- len += snprintf(buf + len, size - len, "Sample |\t");
- for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++)
- len += snprintf(buf + len, size - len, " DMA Reg%d |\t", i);
- len += snprintf(buf + len, size - len, "\n");
-
- for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
- len += snprintf(buf + len, size - len, "%d\t", sampidx);
-
- for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++)
- len += snprintf(buf + len, size - len, " %08x\t",
- ATH_SAMP_DBG(dma_dbg_reg_vals[i]));
- len += snprintf(buf + len, size - len, "\n");
- }
- len += snprintf(buf + len, size - len, "\n");
-
- len += snprintf(buf + len, size - len,
- "Sample Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
- for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
- qcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[0]);
- dcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[4]);
-
- for (i = 0; i < ATH9K_NUM_QUEUES; i++,
- qcuOffset += 4, dcuOffset += 5) {
- if (i == 8) {
- qcuOffset = 0;
- qcuBase++;
- }
-
- if (i == 6) {
- dcuOffset = 0;
- dcuBase++;
- }
- if (!sc->debug.stats.txstats[i].queued)
- continue;
-
- len += snprintf(buf + len, size - len,
- "%4d %7d %2x %1x %2x %2x\n",
- sampidx, i,
- (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
- (*qcuBase & (0x8 << qcuOffset)) >>
- (qcuOffset + 3),
- ATH_SAMP_DBG(dma_dbg_reg_vals[2]) &
- (0x7 << (i * 3)) >> (i * 3),
- (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
- }
- len += snprintf(buf + len, size - len, "\n");
- }
- len += snprintf(buf + len, size - len,
- "samp qcu_sh qcu_fh qcu_comp dcu_comp dcu_arb dcu_fp "
- "ch_idle_dur ch_idle_dur_val txfifo_val0 txfifo_val1 "
- "txfifo_dcu0 txfifo_dcu1 pcu_obs AR_CR\n");
-
- for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
- qcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[0]);
- dcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[4]);
-
- len += snprintf(buf + len, size - len, "%4d %5x %5x ", sampidx,
- (ATH_SAMP_DBG(dma_dbg_reg_vals[3]) & 0x003c0000) >> 18,
- (ATH_SAMP_DBG(dma_dbg_reg_vals[3]) & 0x03c00000) >> 22);
- len += snprintf(buf + len, size - len, "%7x %8x ",
- (ATH_SAMP_DBG(dma_dbg_reg_vals[3]) & 0x1c000000) >> 26,
- (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x3));
- len += snprintf(buf + len, size - len, "%7x %7x ",
- (ATH_SAMP_DBG(dma_dbg_reg_vals[5]) & 0x06000000) >> 25,
- (ATH_SAMP_DBG(dma_dbg_reg_vals[5]) & 0x38000000) >> 27);
- len += snprintf(buf + len, size - len, "%7d %12d ",
- (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x000003fc) >> 2,
- (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x00000400) >> 10);
- len += snprintf(buf + len, size - len, "%12d %12d ",
- (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x00000800) >> 11,
- (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x00001000) >> 12);
- len += snprintf(buf + len, size - len, "%12d %12d ",
- (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x0001e000) >> 13,
- (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x001e0000) >> 17);
- len += snprintf(buf + len, size - len, "0x%07x 0x%07x\n",
- ATH_SAMP_DBG(pcu_obs), ATH_SAMP_DBG(pcu_cr));
- }
-
- len += snprintf(buf + len, size - len,
- "Sample ChNoise Chain privNF #Reading Readings\n");
- for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
- h = ATH_SAMP_DBG(nfCalHist);
- if (!ATH_SAMP_DBG(noise))
- continue;
-
- for (i = 0; i < NUM_NF_READINGS; i++) {
- if (!(chainmask & (1 << i)) ||
- ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)))
- continue;
-
- nread = AR_PHY_CCA_FILTERWINDOW_LENGTH -
- h[i].invalidNFcount;
- len += snprintf(buf + len, size - len,
- "%4d %5d %4d\t %d\t %d\t",
- sampidx, ATH_SAMP_DBG(noise),
- i, h[i].privNF, nread);
- for (j = 0; j < nread; j++)
- len += snprintf(buf + len, size - len,
- " %d", h[i].nfCalBuffer[j]);
- len += snprintf(buf + len, size - len, "\n");
- }
- }
- len += snprintf(buf + len, size - len, "\nCycle counters:\n"
- "Sample Total Rxbusy Rxframes Txframes\n");
- for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
- if (!ATH_SAMP_DBG(cc.cycles))
- continue;
- len += snprintf(buf + len, size - len,
- "%4d %08x %08x %08x %08x\n",
- sampidx, ATH_SAMP_DBG(cc.cycles),
- ATH_SAMP_DBG(cc.rx_busy),
- ATH_SAMP_DBG(cc.rx_frame),
- ATH_SAMP_DBG(cc.tx_frame));
- }
-
- len += snprintf(buf + len, size - len, "Tx status Dump :\n");
- len += snprintf(buf + len, size - len,
- "Sample rssi:- ctl0 ctl1 ctl2 ext0 ext1 ext2 comb "
- "isok rts_fail data_fail rate tid qid "
- "ba_low ba_high tx_before(ms)\n");
- for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
- for (i = 0; i < ATH_DBG_MAX_SAMPLES; i++) {
- if (!ATH_SAMP_DBG(ts[i].jiffies))
- continue;
- len += snprintf(buf + len, size - len, "%-14d"
- "%-4d %-4d %-4d %-4d %-4d %-4d %-4d %-4d %-8d "
- "%-9d %-4d %-3d %-3d %08x %08x %-11d\n",
- sampidx,
- ATH_SAMP_DBG(ts[i].rssi_ctl0),
- ATH_SAMP_DBG(ts[i].rssi_ctl1),
- ATH_SAMP_DBG(ts[i].rssi_ctl2),
- ATH_SAMP_DBG(ts[i].rssi_ext0),
- ATH_SAMP_DBG(ts[i].rssi_ext1),
- ATH_SAMP_DBG(ts[i].rssi_ext2),
- ATH_SAMP_DBG(ts[i].rssi),
- ATH_SAMP_DBG(ts[i].isok),
- ATH_SAMP_DBG(ts[i].rts_fail_cnt),
- ATH_SAMP_DBG(ts[i].data_fail_cnt),
- ATH_SAMP_DBG(ts[i].rateindex),
- ATH_SAMP_DBG(ts[i].tid),
- ATH_SAMP_DBG(ts[i].qid),
- ATH_SAMP_DBG(ts[i].ba_low),
- ATH_SAMP_DBG(ts[i].ba_high),
- jiffies_to_msecs(jiffies -
- ATH_SAMP_DBG(ts[i].jiffies)));
- }
- }
-
- len += snprintf(buf + len, size - len, "Rx status Dump :\n");
- len += snprintf(buf + len, size - len, "Sample rssi:- ctl0 ctl1 ctl2 "
- "ext0 ext1 ext2 comb beacon ant rate rx_before(ms)\n");
- for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
- for (i = 0; i < ATH_DBG_MAX_SAMPLES; i++) {
- if (!ATH_SAMP_DBG(rs[i].jiffies))
- continue;
- len += snprintf(buf + len, size - len, "%-14d"
- "%-4d %-4d %-4d %-4d %-4d %-4d %-4d %-9s %-2d %02x %-13d\n",
- sampidx,
- ATH_SAMP_DBG(rs[i].rssi_ctl0),
- ATH_SAMP_DBG(rs[i].rssi_ctl1),
- ATH_SAMP_DBG(rs[i].rssi_ctl2),
- ATH_SAMP_DBG(rs[i].rssi_ext0),
- ATH_SAMP_DBG(rs[i].rssi_ext1),
- ATH_SAMP_DBG(rs[i].rssi_ext2),
- ATH_SAMP_DBG(rs[i].rssi),
- ATH_SAMP_DBG(rs[i].is_mybeacon) ?
- "True" : "False",
- ATH_SAMP_DBG(rs[i].antenna),
- ATH_SAMP_DBG(rs[i].rate),
- jiffies_to_msecs(jiffies -
- ATH_SAMP_DBG(rs[i].jiffies)));
- }
- }
-
- vfree(bb_mac_samp);
- file->private_data = buf;
-
- return 0;
-#undef ATH_SAMP_DBG
-}
-
-static const struct file_operations fops_samps = {
- .open = open_file_bb_mac_samps,
- .read = ath9k_debugfs_read_buf,
- .release = ath9k_debugfs_release_buf,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-#endif
-
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
static ssize_t read_file_btcoex(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -2059,8 +1774,8 @@ int ath9k_init_debug(struct ath_hw *ah)
sc->debug.debugfs_phy, sc, &fops_rx_chainmask);
debugfs_create_file("tx_chainmask", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, sc, &fops_tx_chainmask);
- debugfs_create_file("disable_ani", S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc, &fops_disable_ani);
+ debugfs_create_file("ani", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_ani);
debugfs_create_bool("paprd", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
&sc->sc_ah->config.enable_paprd);
debugfs_create_file("regidx", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
@@ -2095,11 +1810,6 @@ int ath9k_init_debug(struct ath_hw *ah)
debugfs_create_file("spectral_fft_period", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, sc,
&fops_spectral_fft_period);
-
-#ifdef CONFIG_ATH9K_MAC_DEBUG
- debugfs_create_file("samples", S_IRUSR, sc->debug.debugfs_phy, sc,
- &fops_samps);
-#endif
debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR,
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 9d49aab8b989..fc679198a0f3 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -251,56 +251,10 @@ struct ath_stats {
u32 reset[__RESET_TYPE_MAX];
};
-#define ATH_DBG_MAX_SAMPLES 10
-struct ath_dbg_bb_mac_samp {
- u32 dma_dbg_reg_vals[ATH9K_NUM_DMA_DEBUG_REGS];
- u32 pcu_obs, pcu_cr, noise;
- struct {
- u64 jiffies;
- int8_t rssi_ctl0;
- int8_t rssi_ctl1;
- int8_t rssi_ctl2;
- int8_t rssi_ext0;
- int8_t rssi_ext1;
- int8_t rssi_ext2;
- int8_t rssi;
- bool isok;
- u8 rts_fail_cnt;
- u8 data_fail_cnt;
- u8 rateindex;
- u8 qid;
- u8 tid;
- u32 ba_low;
- u32 ba_high;
- } ts[ATH_DBG_MAX_SAMPLES];
- struct {
- u64 jiffies;
- int8_t rssi_ctl0;
- int8_t rssi_ctl1;
- int8_t rssi_ctl2;
- int8_t rssi_ext0;
- int8_t rssi_ext1;
- int8_t rssi_ext2;
- int8_t rssi;
- bool is_mybeacon;
- u8 antenna;
- u8 rate;
- } rs[ATH_DBG_MAX_SAMPLES];
- struct ath_cycle_counters cc;
- struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
-};
-
struct ath9k_debug {
struct dentry *debugfs_phy;
u32 regidx;
struct ath_stats stats;
-#ifdef CONFIG_ATH9K_MAC_DEBUG
- spinlock_t samp_lock;
- struct ath_dbg_bb_mac_samp bb_mac_samp[ATH_DBG_MAX_SAMPLES];
- u8 sampidx;
- u8 tsidx;
- u8 rsidx;
-#endif
};
int ath9k_init_debug(struct ath_hw *ah);
@@ -364,17 +318,4 @@ static inline void ath_debug_stat_rx(struct ath_softc *sc,
#endif /* CONFIG_ATH9K_DEBUGFS */
-#ifdef CONFIG_ATH9K_MAC_DEBUG
-
-void ath9k_debug_samp_bb_mac(struct ath_softc *sc);
-
-#else
-
-static inline void ath9k_debug_samp_bb_mac(struct ath_softc *sc)
-{
-}
-
-#endif
-
-
#endif /* DEBUG_H */
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c
index b7611b7bbe43..3c6e4138a95d 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.c
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c
@@ -96,7 +96,7 @@ static ssize_t write_file_dfs(struct file *file, const char __user *user_buf,
return -EFAULT;
buf[len] = '\0';
- if (strict_strtoul(buf, 0, &val))
+ if (kstrtoul(buf, 0, &val))
return -EINVAL;
if (val == DFS_STATS_RESET_MAGIC)
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index f5dda84176c3..5205a3625e84 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -234,10 +234,15 @@ static inline void ath9k_skb_queue_complete(struct hif_device_usb *hif_dev,
struct sk_buff *skb;
while ((skb = __skb_dequeue(queue)) != NULL) {
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+ int ln = skb->len;
+#endif
ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
skb, txok);
- if (txok)
+ if (txok) {
TX_STAT_INC(skb_success);
+ TX_STAT_ADD(skb_success_bytes, ln);
+ }
else
TX_STAT_INC(skb_failed);
}
@@ -620,6 +625,7 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
err:
for (i = 0; i < pool_index; i++) {
+ RX_STAT_ADD(skb_completed_bytes, skb_pool[i]->len);
ath9k_htc_rx_msg(hif_dev->htc_handle, skb_pool[i],
skb_pool[i]->len, USB_WLAN_RX_PIPE);
RX_STAT_INC(skb_completed);
@@ -1076,7 +1082,7 @@ static void ath9k_hif_usb_firmware_fail(struct hif_device_usb *hif_dev)
struct device *dev = &hif_dev->udev->dev;
struct device *parent = dev->parent;
- complete(&hif_dev->fw_done);
+ complete_all(&hif_dev->fw_done);
if (parent)
device_lock(parent);
@@ -1125,7 +1131,7 @@ static void ath9k_hif_usb_firmware_cb(const struct firmware *fw, void *context)
release_firmware(fw);
hif_dev->flags |= HIF_USB_READY;
- complete(&hif_dev->fw_done);
+ complete_all(&hif_dev->fw_done);
return;
@@ -1289,7 +1295,9 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
usb_set_intfdata(interface, NULL);
- if (!unplugged && (hif_dev->flags & HIF_USB_START))
+ /* If firmware was loaded we should drop it
+ * go back to first stage bootloader. */
+ if (!unplugged && (hif_dev->flags & HIF_USB_READY))
ath9k_hif_usb_reboot(udev);
kfree(hif_dev);
@@ -1310,7 +1318,10 @@ static int ath9k_hif_usb_suspend(struct usb_interface *interface,
if (!(hif_dev->flags & HIF_USB_START))
ath9k_htc_suspend(hif_dev->htc_handle);
- ath9k_hif_usb_dealloc_urbs(hif_dev);
+ wait_for_completion(&hif_dev->fw_done);
+
+ if (hif_dev->flags & HIF_USB_READY)
+ ath9k_hif_usb_dealloc_urbs(hif_dev);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index d3b099d7898b..055d7c25e090 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -142,6 +142,7 @@ struct ath9k_htc_target_aggr {
#define WLAN_RC_40_FLAG 0x02
#define WLAN_RC_SGI_FLAG 0x04
#define WLAN_RC_HT_FLAG 0x08
+#define ATH_RC_TX_STBC_FLAG 0x20
struct ath9k_htc_rateset {
u8 rs_nrates;
@@ -208,6 +209,9 @@ struct ath9k_htc_target_rx_stats {
case NL80211_IFTYPE_AP: \
_priv->num_ap_vif++; \
break; \
+ case NL80211_IFTYPE_MESH_POINT: \
+ _priv->num_mbss_vif++; \
+ break; \
default: \
break; \
} \
@@ -224,6 +228,9 @@ struct ath9k_htc_target_rx_stats {
case NL80211_IFTYPE_AP: \
_priv->num_ap_vif--; \
break; \
+ case NL80211_IFTYPE_MESH_POINT: \
+ _priv->num_mbss_vif--; \
+ break; \
default: \
break; \
} \
@@ -317,7 +324,9 @@ static inline struct ath9k_htc_tx_ctl *HTC_SKB_CB(struct sk_buff *skb)
#ifdef CONFIG_ATH9K_HTC_DEBUGFS
#define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++)
+#define TX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c += a)
#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c++)
+#define RX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c += a)
#define CAB_STAT_INC priv->debug.tx_stats.cab_queued++
#define TX_QSTAT_INC(q) (priv->debug.tx_stats.queue_stats[q]++)
@@ -330,6 +339,7 @@ struct ath_tx_stats {
u32 buf_completed;
u32 skb_queued;
u32 skb_success;
+ u32 skb_success_bytes;
u32 skb_failed;
u32 cab_queued;
u32 queue_stats[IEEE80211_NUM_ACS];
@@ -338,6 +348,7 @@ struct ath_tx_stats {
struct ath_rx_stats {
u32 skb_allocated;
u32 skb_completed;
+ u32 skb_completed_bytes;
u32 skb_dropped;
u32 err_crc;
u32 err_decrypt_crc;
@@ -355,10 +366,20 @@ struct ath9k_debug {
struct ath_rx_stats rx_stats;
};
+void ath9k_htc_get_et_strings(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 sset, u8 *data);
+int ath9k_htc_get_et_sset_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int sset);
+void ath9k_htc_get_et_stats(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data);
#else
#define TX_STAT_INC(c) do { } while (0)
+#define TX_STAT_ADD(c, a) do { } while (0)
#define RX_STAT_INC(c) do { } while (0)
+#define RX_STAT_ADD(c, a) do { } while (0)
#define CAB_STAT_INC do { } while (0)
#define TX_QSTAT_INC(c) do { } while (0)
@@ -450,6 +471,7 @@ struct ath9k_htc_priv {
u8 sta_slot;
u8 vif_sta_pos[ATH9K_HTC_MAX_VIF];
u8 num_ibss_vif;
+ u8 num_mbss_vif;
u8 num_sta_vif;
u8 num_sta_assoc_vif;
u8 num_ap_vif;
@@ -575,6 +597,8 @@ bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv);
void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw);
+struct base_eep_header *ath9k_htc_get_eeprom_base(struct ath9k_htc_priv *priv);
+
#ifdef CONFIG_MAC80211_LEDS
void ath9k_init_leds(struct ath9k_htc_priv *priv);
void ath9k_deinit_leds(struct ath9k_htc_priv *priv);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index f13f458dd656..e0c03bd64182 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -28,7 +28,8 @@ void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv)
ath9k_hw_get_txq_props(ah, priv->beaconq, &qi);
- if (priv->ah->opmode == NL80211_IFTYPE_AP) {
+ if (priv->ah->opmode == NL80211_IFTYPE_AP ||
+ priv->ah->opmode == NL80211_IFTYPE_MESH_POINT) {
qi.tqi_aifs = 1;
qi.tqi_cwmin = 0;
qi.tqi_cwmax = 0;
@@ -628,6 +629,7 @@ void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
case NL80211_IFTYPE_ADHOC:
ath9k_htc_beacon_config_adhoc(priv, cur_conf);
break;
+ case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
ath9k_htc_beacon_config_ap(priv, cur_conf);
break;
@@ -649,6 +651,7 @@ void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv)
case NL80211_IFTYPE_ADHOC:
ath9k_htc_beacon_config_adhoc(priv, cur_conf);
break;
+ case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
ath9k_htc_beacon_config_ap(priv, cur_conf);
break;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
index 87110de577ef..c1b45e2f8481 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
@@ -471,7 +471,7 @@ static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
return -EFAULT;
buf[len] = '\0';
- if (strict_strtoul(buf, 0, &mask))
+ if (kstrtoul(buf, 0, &mask))
return -EINVAL;
common->debug_mask = mask;
@@ -496,21 +496,7 @@ static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
ssize_t retval = 0;
char *buf;
- /*
- * This can be done since all the 3 EEPROM families have the
- * same base header upto a certain point, and we are interested in
- * the data only upto that point.
- */
-
- if (AR_SREV_9271(priv->ah))
- pBase = (struct base_eep_header *)
- &priv->ah->eeprom.map4k.baseEepHeader;
- else if (priv->ah->hw_version.usbdev == AR9280_USB)
- pBase = (struct base_eep_header *)
- &priv->ah->eeprom.def.baseEepHeader;
- else if (priv->ah->hw_version.usbdev == AR9287_USB)
- pBase = (struct base_eep_header *)
- &priv->ah->eeprom.map9287.baseEepHeader;
+ pBase = ath9k_htc_get_eeprom_base(priv);
if (pBase == NULL) {
ath_err(common, "Unknown EEPROM type\n");
@@ -916,6 +902,87 @@ static const struct file_operations fops_modal_eeprom = {
.llseek = default_llseek,
};
+
+/* Ethtool support for get-stats */
+#define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
+static const char ath9k_htc_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "tx_pkts_nic",
+ "tx_bytes_nic",
+ "rx_pkts_nic",
+ "rx_bytes_nic",
+
+ AMKSTR(d_tx_pkts),
+
+ "d_rx_crc_err",
+ "d_rx_decrypt_crc_err",
+ "d_rx_phy_err",
+ "d_rx_mic_err",
+ "d_rx_pre_delim_crc_err",
+ "d_rx_post_delim_crc_err",
+ "d_rx_decrypt_busy_err",
+
+ "d_rx_phyerr_radar",
+ "d_rx_phyerr_ofdm_timing",
+ "d_rx_phyerr_cck_timing",
+
+};
+#define ATH9K_HTC_SSTATS_LEN ARRAY_SIZE(ath9k_htc_gstrings_stats)
+
+void ath9k_htc_get_et_strings(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 sset, u8 *data)
+{
+ if (sset == ETH_SS_STATS)
+ memcpy(data, *ath9k_htc_gstrings_stats,
+ sizeof(ath9k_htc_gstrings_stats));
+}
+
+int ath9k_htc_get_et_sset_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return ATH9K_HTC_SSTATS_LEN;
+ return 0;
+}
+
+#define STXBASE priv->debug.tx_stats
+#define SRXBASE priv->debug.rx_stats
+#define ASTXQ(a) \
+ data[i++] = STXBASE.a[IEEE80211_AC_BE]; \
+ data[i++] = STXBASE.a[IEEE80211_AC_BK]; \
+ data[i++] = STXBASE.a[IEEE80211_AC_VI]; \
+ data[i++] = STXBASE.a[IEEE80211_AC_VO]
+
+void ath9k_htc_get_et_stats(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ int i = 0;
+
+ data[i++] = STXBASE.skb_success;
+ data[i++] = STXBASE.skb_success_bytes;
+ data[i++] = SRXBASE.skb_completed;
+ data[i++] = SRXBASE.skb_completed_bytes;
+
+ ASTXQ(queue_stats);
+
+ data[i++] = SRXBASE.err_crc;
+ data[i++] = SRXBASE.err_decrypt_crc;
+ data[i++] = SRXBASE.err_phy;
+ data[i++] = SRXBASE.err_mic;
+ data[i++] = SRXBASE.err_pre_delim;
+ data[i++] = SRXBASE.err_post_delim;
+ data[i++] = SRXBASE.err_decrypt_busy;
+
+ data[i++] = SRXBASE.err_phy_stats[ATH9K_PHYERR_RADAR];
+ data[i++] = SRXBASE.err_phy_stats[ATH9K_PHYERR_OFDM_TIMING];
+ data[i++] = SRXBASE.err_phy_stats[ATH9K_PHYERR_CCK_TIMING];
+
+ WARN_ON(i != ATH9K_HTC_SSTATS_LEN);
+}
+
+
int ath9k_htc_init_debug(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index a47f5e05fc04..c3676bf1d6c4 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -517,6 +517,9 @@ static void setup_ht_cap(struct ath9k_htc_priv *priv,
ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
tx_streams, rx_streams);
+ if (tx_streams >= 2)
+ ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
+
if (tx_streams != rx_streams) {
ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
ht_info->mcs.tx_params |= ((tx_streams - 1) <<
@@ -698,6 +701,9 @@ static const struct ieee80211_iface_limit if_limits[] = {
{ .max = 2, .types = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_P2P_CLIENT) },
{ .max = 2, .types = BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
BIT(NL80211_IFTYPE_P2P_GO) },
};
@@ -712,6 +718,7 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
struct ieee80211_hw *hw)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct base_eep_header *pBase;
hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_AMPDU_AGGREGATION |
@@ -721,6 +728,7 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+ IEEE80211_HW_MFP_CAPABLE |
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
hw->wiphy->interface_modes =
@@ -728,7 +736,8 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_GO) |
- BIT(NL80211_IFTYPE_P2P_CLIENT);
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_MESH_POINT);
hw->wiphy->iface_combinations = &if_comb;
hw->wiphy->n_iface_combinations = 1;
@@ -765,6 +774,12 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
&priv->sbands[IEEE80211_BAND_5GHZ].ht_cap);
}
+ pBase = ath9k_htc_get_eeprom_base(priv);
+ if (pBase) {
+ hw->wiphy->available_antennas_rx = pBase->rxMask;
+ hw->wiphy->available_antennas_tx = pBase->txMask;
+ }
+
SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
}
@@ -846,6 +861,7 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv,
if (error != 0)
goto err_rx;
+ ath9k_hw_disable(priv->ah);
#ifdef CONFIG_MAC80211_LEDS
/* must be initialized before ieee80211_register_hw */
priv->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(priv->hw,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 62f1b7636c92..5c1bec18c9e3 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -113,7 +113,9 @@ static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
struct ath9k_htc_priv *priv = data;
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
- if ((vif->type == NL80211_IFTYPE_AP) && bss_conf->enable_beacon)
+ if ((vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
+ bss_conf->enable_beacon)
priv->reconfig_beacon = true;
if (bss_conf->assoc) {
@@ -180,6 +182,8 @@ static void ath9k_htc_set_opmode(struct ath9k_htc_priv *priv)
priv->ah->opmode = NL80211_IFTYPE_ADHOC;
else if (priv->num_ap_vif)
priv->ah->opmode = NL80211_IFTYPE_AP;
+ else if (priv->num_mbss_vif)
+ priv->ah->opmode = NL80211_IFTYPE_MESH_POINT;
else
priv->ah->opmode = NL80211_IFTYPE_STATION;
@@ -623,6 +627,8 @@ static void ath9k_htc_setup_rate(struct ath9k_htc_priv *priv,
trate->rates.ht_rates.rs_nrates = j;
caps = WLAN_RC_HT_FLAG;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
+ caps |= ATH_RC_TX_STBC_FLAG;
if (sta->ht_cap.mcs.rx_mask[1])
caps |= WLAN_RC_DS_FLAG;
if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) &&
@@ -810,8 +816,7 @@ void ath9k_htc_ani_work(struct work_struct *work)
}
/* Verify whether we must check ANI */
- if (ah->config.enable_ani &&
- (timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
+ if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
aniflag = true;
common->ani.checkani_timer = timestamp;
}
@@ -841,8 +846,7 @@ set_timer:
* short calibration and long calibration.
*/
cal_interval = ATH_LONG_CALINTERVAL;
- if (ah->config.enable_ani)
- cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
+ cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
if (!common->ani.caldone)
cal_interval = min(cal_interval, (u32)short_cal_interval);
@@ -1052,6 +1056,9 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
case NL80211_IFTYPE_AP:
hvif.opmode = HTC_M_HOSTAP;
break;
+ case NL80211_IFTYPE_MESH_POINT:
+ hvif.opmode = HTC_M_WDS; /* close enough */
+ break;
default:
ath_err(common,
"Interface type %d not yet supported\n", vif->type);
@@ -1084,6 +1091,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
INC_VIF(priv, vif->type);
if ((vif->type == NL80211_IFTYPE_AP) ||
+ (vif->type == NL80211_IFTYPE_MESH_POINT) ||
(vif->type == NL80211_IFTYPE_ADHOC))
ath9k_htc_assign_bslot(priv, vif);
@@ -1134,6 +1142,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
DEC_VIF(priv, vif->type);
if ((vif->type == NL80211_IFTYPE_AP) ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
(vif->type == NL80211_IFTYPE_ADHOC))
ath9k_htc_remove_bslot(priv, vif);
@@ -1525,9 +1534,10 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon) {
/*
* Disable SWBA interrupt only if there are no
- * AP/IBSS interfaces.
+ * concurrent AP/mesh or IBSS interfaces.
*/
- if ((priv->num_ap_vif <= 1) || priv->num_ibss_vif) {
+ if ((priv->num_ap_vif + priv->num_mbss_vif <= 1) ||
+ priv->num_ibss_vif) {
ath_dbg(common, CONFIG,
"Beacon disabled for BSS: %pM\n",
bss_conf->bssid);
@@ -1538,12 +1548,15 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BEACON_INT) {
/*
- * Reset the HW TSF for the first AP interface.
+ * Reset the HW TSF for the first AP or mesh interface.
*/
- if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
- (priv->nvifs == 1) &&
- (priv->num_ap_vif == 1) &&
- (vif->type == NL80211_IFTYPE_AP)) {
+ if (priv->nvifs == 1 &&
+ ((priv->ah->opmode == NL80211_IFTYPE_AP &&
+ vif->type == NL80211_IFTYPE_AP &&
+ priv->num_ap_vif == 1) ||
+ (priv->ah->opmode == NL80211_IFTYPE_MESH_POINT &&
+ vif->type == NL80211_IFTYPE_MESH_POINT &&
+ priv->num_mbss_vif == 1))) {
set_bit(OP_TSF_RESET, &priv->op_flags);
}
ath_dbg(common, CONFIG,
@@ -1761,6 +1774,43 @@ static int ath9k_htc_get_stats(struct ieee80211_hw *hw,
return 0;
}
+struct base_eep_header *ath9k_htc_get_eeprom_base(struct ath9k_htc_priv *priv)
+{
+ struct base_eep_header *pBase = NULL;
+ /*
+ * This can be done since all the 3 EEPROM families have the
+ * same base header upto a certain point, and we are interested in
+ * the data only upto that point.
+ */
+
+ if (AR_SREV_9271(priv->ah))
+ pBase = (struct base_eep_header *)
+ &priv->ah->eeprom.map4k.baseEepHeader;
+ else if (priv->ah->hw_version.usbdev == AR9280_USB)
+ pBase = (struct base_eep_header *)
+ &priv->ah->eeprom.def.baseEepHeader;
+ else if (priv->ah->hw_version.usbdev == AR9287_USB)
+ pBase = (struct base_eep_header *)
+ &priv->ah->eeprom.map9287.baseEepHeader;
+ return pBase;
+}
+
+
+static int ath9k_htc_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant,
+ u32 *rx_ant)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct base_eep_header *pBase = ath9k_htc_get_eeprom_base(priv);
+ if (pBase) {
+ *tx_ant = pBase->txMask;
+ *rx_ant = pBase->rxMask;
+ } else {
+ *tx_ant = 0;
+ *rx_ant = 0;
+ }
+ return 0;
+}
+
struct ieee80211_ops ath9k_htc_ops = {
.tx = ath9k_htc_tx,
.start = ath9k_htc_start,
@@ -1786,4 +1836,11 @@ struct ieee80211_ops ath9k_htc_ops = {
.set_coverage_class = ath9k_htc_set_coverage_class,
.set_bitrate_mask = ath9k_htc_set_bitrate_mask,
.get_stats = ath9k_htc_get_stats,
+ .get_antenna = ath9k_htc_get_antenna,
+
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+ .get_et_sset_count = ath9k_htc_get_et_sset_count,
+ .get_et_stats = ath9k_htc_get_et_stats,
+ .get_et_strings = ath9k_htc_get_et_strings,
+#endif
};
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 6bd0e92ea2aa..e602c9519709 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -887,7 +887,7 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
if (priv->rxfilter & FIF_PSPOLL)
rfilt |= ATH9K_RX_FILTER_PSPOLL;
- if (priv->nvifs > 1)
+ if (priv->nvifs > 1 || priv->rxfilter & FIF_OTHER_BSS)
rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
return rfilt;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 15dfefcf2d0f..4ca0cb060106 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -452,7 +452,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
ah->config.pcie_clock_req = 0;
ah->config.pcie_waen = 0;
ah->config.analog_shiftreg = 1;
- ah->config.enable_ani = true;
for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
ah->config.spurchans[i][0] = AR_NO_SPUR;
@@ -549,8 +548,7 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
ah->eep_ops->get_eeprom_ver(ah),
ah->eep_ops->get_eeprom_rev(ah));
- if (ah->config.enable_ani)
- ath9k_hw_ani_init(ah);
+ ath9k_hw_ani_init(ah);
return 0;
}
@@ -1250,10 +1248,10 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
switch (opmode) {
case NL80211_IFTYPE_ADHOC:
- case NL80211_IFTYPE_MESH_POINT:
set |= AR_STA_ID1_ADHOC;
REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
break;
+ case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
set |= AR_STA_ID1_STA_AP;
/* fall through */
@@ -1872,7 +1870,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ah->caldata = caldata;
if (caldata && (chan->channel != caldata->channel ||
- chan->channelFlags != caldata->channelFlags)) {
+ chan->channelFlags != caldata->channelFlags ||
+ chan->chanmode != caldata->chanmode)) {
/* Operating channel changed, reset channel calibration data */
memset(caldata, 0, sizeof(*caldata));
ath9k_init_nfcal_hist_buffer(ah, chan);
@@ -2255,12 +2254,12 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
switch (ah->opmode) {
case NL80211_IFTYPE_ADHOC:
- case NL80211_IFTYPE_MESH_POINT:
REG_SET_BIT(ah, AR_TXCFG,
AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY);
REG_WRITE(ah, AR_NEXT_NDP_TIMER, next_beacon +
TU_TO_USEC(ah->atim_window ? ah->atim_window : 1));
flags |= AR_NDP_TIMER_EN;
+ case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
REG_WRITE(ah, AR_NEXT_TBTT_TIMER, next_beacon);
REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, next_beacon -
@@ -2600,17 +2599,12 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
if (!(ah->ent_mode & AR_ENT_OTP_49GHZ_DISABLE))
pCap->hw_caps |= ATH9K_HW_CAP_MCI;
- if (AR_SREV_9462_20(ah))
+ if (AR_SREV_9462_20_OR_LATER(ah))
pCap->hw_caps |= ATH9K_HW_CAP_RTT;
}
- if (AR_SREV_9280_20_OR_LATER(ah)) {
- pCap->hw_caps |= ATH9K_HW_WOW_DEVICE_CAPABLE |
- ATH9K_HW_WOW_PATTERN_MATCH_EXACT;
-
- if (AR_SREV_9280(ah))
- pCap->hw_caps |= ATH9K_HW_WOW_PATTERN_MATCH_DWORD;
- }
+ if (AR_SREV_9462(ah))
+ pCap->hw_caps |= ATH9K_HW_WOW_DEVICE_CAPABLE;
if (AR_SREV_9300_20_OR_LATER(ah) &&
ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
@@ -3048,7 +3042,7 @@ void ath9k_hw_gen_timer_start(struct ath_hw *ah,
timer_next = tsf + trig_timeout;
- ath_dbg(ath9k_hw_common(ah), HWTIMER,
+ ath_dbg(ath9k_hw_common(ah), BTCOEX,
"current tsf %x period %x timer_next %x\n",
tsf, timer_period, timer_next);
@@ -3147,7 +3141,7 @@ void ath_gen_timer_isr(struct ath_hw *ah)
index = rightmost_index(timer_table, &thresh_mask);
timer = timer_table->timers[index];
BUG_ON(!timer);
- ath_dbg(common, HWTIMER, "TSF overflow for Gen timer %d\n",
+ ath_dbg(common, BTCOEX, "TSF overflow for Gen timer %d\n",
index);
timer->overflow(timer->arg);
}
@@ -3156,7 +3150,7 @@ void ath_gen_timer_isr(struct ath_hw *ah)
index = rightmost_index(timer_table, &trigger_mask);
timer = timer_table->timers[index];
BUG_ON(!timer);
- ath_dbg(common, HWTIMER,
+ ath_dbg(common, BTCOEX,
"Gen timer[%d] trigger\n", index);
timer->trigger(timer->arg);
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index ae3034374bc4..cd74b3afef7d 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -246,9 +246,7 @@ enum ath9k_hw_caps {
ATH9K_HW_CAP_MCI = BIT(15),
ATH9K_HW_CAP_DFS = BIT(16),
ATH9K_HW_WOW_DEVICE_CAPABLE = BIT(17),
- ATH9K_HW_WOW_PATTERN_MATCH_EXACT = BIT(18),
- ATH9K_HW_WOW_PATTERN_MATCH_DWORD = BIT(19),
- ATH9K_HW_CAP_PAPRD = BIT(20),
+ ATH9K_HW_CAP_PAPRD = BIT(18),
};
/*
@@ -291,7 +289,6 @@ struct ath9k_ops_config {
u32 ofdm_trig_high;
u32 cck_trig_high;
u32 cck_trig_low;
- u32 enable_ani;
u32 enable_paprd;
int serialize_regmode;
bool rx_intr_mitigation;
@@ -310,6 +307,10 @@ struct ath9k_ops_config {
u16 spurchans[AR_EEPROM_MODAL_SPURS][2];
u8 max_txtrig_level;
u16 ani_poll_interval; /* ANI poll interval in ms */
+
+ /* Platform specific config */
+ u32 xlna_gpio;
+ bool xatten_margin_cfg;
};
enum ath9k_int {
@@ -423,7 +424,6 @@ struct ath9k_hw_cal_data {
struct ath9k_channel {
struct ieee80211_channel *chan;
- struct ar5416AniState ani;
u16 channel;
u32 channelFlags;
u32 chanmode;
@@ -854,10 +854,10 @@ struct ath_hw {
u32 globaltxtimeout;
/* ANI */
- u32 proc_phyerr;
u32 aniperiod;
enum ath9k_ani_cmd ani_function;
u32 ani_skip_count;
+ struct ar5416AniState ani;
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
struct ath_btcoex_hw btcoex_hw;
@@ -882,9 +882,6 @@ struct ath_hw {
struct ar5416IniArray iniBank6;
struct ar5416IniArray iniAddac;
struct ar5416IniArray iniPcieSerdes;
-#ifdef CONFIG_PM_SLEEP
- struct ar5416IniArray iniPcieSerdesWow;
-#endif
struct ar5416IniArray iniPcieSerdesLowPower;
struct ar5416IniArray iniModesFastClock;
struct ar5416IniArray iniAdditional;
@@ -895,6 +892,9 @@ struct ath_hw {
struct ar5416IniArray iniCckfirJapan2484;
struct ar5416IniArray iniModes_9271_ANI_reg;
struct ar5416IniArray ini_radio_post_sys2ant;
+ struct ar5416IniArray ini_modes_rxgain_5g_xlna;
+ struct ar5416IniArray ini_modes_rxgain_bb_core;
+ struct ar5416IniArray ini_modes_rxgain_bb_postamble;
struct ar5416IniArray iniMac[ATH_INI_NUM_SPLIT];
struct ar5416IniArray iniBB[ATH_INI_NUM_SPLIT];
@@ -1165,8 +1165,6 @@ static inline void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
}
#endif
-
-
#define ATH9K_CLOCK_RATE_CCK 22
#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40
#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 2ba494567777..16f8b201642b 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -21,6 +21,7 @@
#include <linux/ath9k_platform.h>
#include <linux/module.h>
#include <linux/relay.h>
+#include <net/ieee80211_radiotap.h>
#include "ath9k.h"
@@ -431,6 +432,8 @@ static int ath9k_init_queues(struct ath_softc *sc)
sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
ath_cabq_update(sc);
+ sc->tx.uapsdq = ath_txq_setup(sc, ATH9K_TX_QUEUE_UAPSD, 0);
+
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
sc->tx.txq_map[i]->mac80211_qnum = i;
@@ -510,6 +513,27 @@ static void ath9k_init_misc(struct ath_softc *sc)
sc->spec_config.fft_period = 0xF;
}
+static void ath9k_init_platform(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (common->bus_ops->ath_bus_type != ATH_PCI)
+ return;
+
+ if (sc->driver_data & (ATH9K_PCI_CUS198 |
+ ATH9K_PCI_CUS230)) {
+ ah->config.xlna_gpio = 9;
+ ah->config.xatten_margin_cfg = true;
+
+ ath_info(common, "Set parameters for %s\n",
+ (sc->driver_data & ATH9K_PCI_CUS198) ?
+ "CUS198" : "CUS230");
+ } else if (sc->driver_data & ATH9K_PCI_CUS217) {
+ ath_info(common, "CUS217 card detected\n");
+ }
+}
+
static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob,
void *ctx)
{
@@ -602,6 +626,11 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
common->disable_ani = false;
/*
+ * Platform quirks.
+ */
+ ath9k_init_platform(sc);
+
+ /*
* Enable Antenna diversity only when BTCOEX is disabled
* and the user manually requests the feature.
*/
@@ -613,9 +642,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
spin_lock_init(&sc->sc_serial_rw);
spin_lock_init(&sc->sc_pm_lock);
mutex_init(&sc->mutex);
-#ifdef CONFIG_ATH9K_MAC_DEBUG
- spin_lock_init(&sc->debug.samp_lock);
-#endif
tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
(unsigned long)sc);
@@ -755,6 +781,15 @@ static const struct ieee80211_iface_combination if_comb[] = {
}
};
+#ifdef CONFIG_PM
+static const struct wiphy_wowlan_support ath9k_wowlan_support = {
+ .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
+ .n_patterns = MAX_NUM_USER_PATTERN,
+ .pattern_min_len = 1,
+ .pattern_max_len = MAX_PATTERN_SIZE,
+};
+#endif
+
void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
{
struct ath_hw *ah = sc->sc_ah;
@@ -769,12 +804,19 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_SUPPORTS_RC_TABLE;
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
- hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
+ hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+
+ if (AR_SREV_9280_20_OR_LATER(ah))
+ hw->radiotap_mcs_details |=
+ IEEE80211_RADIOTAP_MCS_HAVE_STBC;
+ }
if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt)
hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+ hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
@@ -794,21 +836,13 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
#ifdef CONFIG_PM_SLEEP
-
if ((ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) &&
- device_can_wakeup(sc->dev)) {
-
- hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
- WIPHY_WOWLAN_DISCONNECT;
- hw->wiphy->wowlan.n_patterns = MAX_NUM_USER_PATTERN;
- hw->wiphy->wowlan.pattern_min_len = 1;
- hw->wiphy->wowlan.pattern_max_len = MAX_PATTERN_SIZE;
-
- }
+ (sc->driver_data & ATH9K_PCI_WOW) &&
+ device_can_wakeup(sc->dev))
+ hw->wiphy->wowlan = &ath9k_wowlan_support;
atomic_set(&sc->wow_sleep_proc_intr, -1);
atomic_set(&sc->wow_got_bmiss_intr, -1);
-
#endif
hw->queues = 4;
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index 849259b07370..fff5d3ccc663 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -390,9 +390,7 @@ void ath_ani_calibrate(unsigned long data)
}
/* Verify whether we must check ANI */
- if (sc->sc_ah->config.enable_ani
- && (timestamp - common->ani.checkani_timer) >=
- ah->config.ani_poll_interval) {
+ if ((timestamp - common->ani.checkani_timer) >= ah->config.ani_poll_interval) {
aniflag = true;
common->ani.checkani_timer = timestamp;
}
@@ -418,7 +416,6 @@ void ath_ani_calibrate(unsigned long data)
longcal ? "long" : "", shortcal ? "short" : "",
aniflag ? "ani" : "", common->ani.caldone ? "true" : "false");
- ath9k_debug_samp_bb_mac(sc);
ath9k_ps_restore(sc);
set_timer:
@@ -428,9 +425,7 @@ set_timer:
* short calibration and long calibration.
*/
cal_interval = ATH_LONG_CALINTERVAL;
- if (sc->sc_ah->config.enable_ani)
- cal_interval = min(cal_interval,
- (u32)ah->config.ani_poll_interval);
+ cal_interval = min(cal_interval, (u32)ah->config.ani_poll_interval);
if (!common->ani.caldone)
cal_interval = min(cal_interval, (u32)short_cal_interval);
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 566109a40fb3..2ef05ebffbcf 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -547,6 +547,7 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
rs->rs_status = 0;
rs->rs_flags = 0;
+ rs->flag = 0;
rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
rs->rs_tstamp = ads.AR_RcvTimestamp;
@@ -586,10 +587,17 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
rs->rs_moreaggr =
(ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
- rs->rs_flags =
- (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
- rs->rs_flags |=
- (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
+
+ /* directly mapped flags for ieee80211_rx_status */
+ rs->flag |=
+ (ads.ds_rxstatus3 & AR_GI) ? RX_FLAG_SHORT_GI : 0;
+ rs->flag |=
+ (ads.ds_rxstatus3 & AR_2040) ? RX_FLAG_40MHZ : 0;
+ if (AR_SREV_9280_20_OR_LATER(ah))
+ rs->flag |=
+ (ads.ds_rxstatus3 & AR_STBC) ?
+ /* we can only Nss=1 STBC */
+ (1 << RX_FLAG_STBC_SHIFT) : 0;
if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 5865f92998e1..b02dfce964b4 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -149,6 +149,7 @@ struct ath_rx_status {
u32 evm2;
u32 evm3;
u32 evm4;
+ u32 flag; /* see enum mac80211_rx_flags */
};
struct ath_htc_rx_status {
@@ -533,7 +534,8 @@ struct ar5416_desc {
#define AR_2040 0x00000002
#define AR_Parallel40 0x00000004
#define AR_Parallel40_S 2
-#define AR_RxStatusRsvd30 0x000000f8
+#define AR_STBC 0x00000008 /* on ar9280 and later */
+#define AR_RxStatusRsvd30 0x000000f0
#define AR_RxAntenna 0xffffff00
#define AR_RxAntenna_S 8
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 5092ecae7706..1737a3e33685 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -193,7 +193,6 @@ static bool ath_prepare_reset(struct ath_softc *sc)
ath_stop_ani(sc);
del_timer_sync(&sc->rx_poll_timer);
- ath9k_debug_samp_bb_mac(sc);
ath9k_hw_disable_interrupts(ah);
if (!ath_drain_all_txq(sc))
@@ -1211,13 +1210,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
ath_update_survey_stats(sc);
spin_unlock_irqrestore(&common->cc_lock, flags);
- /*
- * Preserve the current channel values, before updating
- * the same channel
- */
- if (ah->curchan && (old_pos == pos))
- ath9k_hw_getnf(ah, ah->curchan);
-
ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
curchan, channel_type);
@@ -1273,7 +1265,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
curchan->center_freq);
} else {
/* perform spectral scan if requested. */
- if (sc->scanning &&
+ if (test_bit(SC_OP_SCANNING, &sc->sc_flags) &&
sc->spectral_mode == SPECTRAL_CHANSCAN)
ath9k_spectral_scan_trigger(hw);
}
@@ -1690,7 +1682,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
bool flush = false;
int ret = 0;
- local_bh_disable();
+ mutex_lock(&sc->mutex);
switch (action) {
case IEEE80211_AMPDU_RX_START:
@@ -1723,7 +1715,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
ath_err(ath9k_hw_common(sc->sc_ah), "Unknown AMPDU action\n");
}
- local_bh_enable();
+ mutex_unlock(&sc->mutex);
return ret;
}
@@ -2007,7 +1999,6 @@ static void ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_hw_capabilities *pcaps = &ah->caps;
int pattern_count = 0;
int i, byte_cnt;
u8 dis_deauth_pattern[MAX_PATTERN_SIZE];
@@ -2077,36 +2068,9 @@ static void ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
/* Create Disassociate pattern mask */
- if (pcaps->hw_caps & ATH9K_HW_WOW_PATTERN_MATCH_EXACT) {
-
- if (pcaps->hw_caps & ATH9K_HW_WOW_PATTERN_MATCH_DWORD) {
- /*
- * for AR9280, because of hardware limitation, the
- * first 4 bytes have to be matched for all patterns.
- * the mask for disassociation and de-auth pattern
- * matching need to enable the first 4 bytes.
- * also the duration field needs to be filled.
- */
- dis_deauth_mask[0] = 0xf0;
-
- /*
- * fill in duration field
- FIXME: what is the exact value ?
- */
- dis_deauth_pattern[2] = 0xff;
- dis_deauth_pattern[3] = 0xff;
- } else {
- dis_deauth_mask[0] = 0xfe;
- }
-
- dis_deauth_mask[1] = 0x03;
- dis_deauth_mask[2] = 0xc0;
- } else {
- dis_deauth_mask[0] = 0xef;
- dis_deauth_mask[1] = 0x3f;
- dis_deauth_mask[2] = 0x00;
- dis_deauth_mask[3] = 0xfc;
- }
+ dis_deauth_mask[0] = 0xfe;
+ dis_deauth_mask[1] = 0x03;
+ dis_deauth_mask[2] = 0xc0;
ath_dbg(common, WOW, "Adding disassoc/deauth patterns for WoW\n");
@@ -2342,15 +2306,13 @@ static void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled)
static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
{
struct ath_softc *sc = hw->priv;
-
- sc->scanning = 1;
+ set_bit(SC_OP_SCANNING, &sc->sc_flags);
}
static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
{
struct ath_softc *sc = hw->priv;
-
- sc->scanning = 0;
+ clear_bit(SC_OP_SCANNING, &sc->sc_flags);
}
struct ieee80211_ops ath9k_ops = {
@@ -2378,6 +2340,7 @@ struct ieee80211_ops ath9k_ops = {
.flush = ath9k_flush,
.tx_frames_pending = ath9k_tx_frames_pending,
.tx_last_beacon = ath9k_tx_last_beacon,
+ .release_buffered_frames = ath9k_release_buffered_frames,
.get_stats = ath9k_get_stats,
.set_antenna = ath9k_set_antenna,
.get_antenna = ath9k_get_antenna,
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 0e0d39583837..c585c9b35973 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -34,8 +34,108 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */
{ PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E AR9300 */
+
+ /* PCI-E CUS198 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2086),
+ .driver_data = ATH9K_PCI_CUS198 },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x1237),
+ .driver_data = ATH9K_PCI_CUS198 },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2126),
+ .driver_data = ATH9K_PCI_CUS198 },
+
+ /* PCI-E CUS230 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2152),
+ .driver_data = ATH9K_PCI_CUS230 },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE075),
+ .driver_data = ATH9K_PCI_CUS230 },
+
{ PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */
{ PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */
+
+ /* PCI-E CUS217 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2116),
+ .driver_data = ATH9K_PCI_CUS217 },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ 0x11AD, /* LITEON */
+ 0x6661),
+ .driver_data = ATH9K_PCI_CUS217 },
+
+ /* AR9462 with WoW support */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3117),
+ .driver_data = ATH9K_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ PCI_VENDOR_ID_LENOVO,
+ 0x3214),
+ .driver_data = ATH9K_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ PCI_VENDOR_ID_ATTANSIC,
+ 0x0091),
+ .driver_data = ATH9K_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2110),
+ .driver_data = ATH9K_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ PCI_VENDOR_ID_ASUSTEK,
+ 0x850E),
+ .driver_data = ATH9K_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ 0x11AD, /* LITEON */
+ 0x6631),
+ .driver_data = ATH9K_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ 0x11AD, /* LITEON */
+ 0x6641),
+ .driver_data = ATH9K_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ PCI_VENDOR_ID_HP,
+ 0x1864),
+ .driver_data = ATH9K_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ 0x14CD, /* USI */
+ 0x0063),
+ .driver_data = ATH9K_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ 0x14CD, /* USI */
+ 0x0064),
+ .driver_data = ATH9K_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ 0x10CF, /* Fujitsu */
+ 0x1783),
+ .driver_data = ATH9K_PCI_WOW },
+
{ PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */
{ PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */
{ PCI_VDEVICE(ATHEROS, 0x0036) }, /* PCI-E AR9565 */
@@ -221,6 +321,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sc->hw = hw;
sc->dev = &pdev->dev;
sc->mem = pcim_iomap_table(pdev)[0];
+ sc->driver_data = id->driver_data;
/* Will be cleared in ath9k_start() */
set_bit(SC_OP_INVALID, &sc->sc_flags);
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 8be2b5d8c155..865e043e8aa6 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -868,10 +868,7 @@ static int ath9k_process_rate(struct ath_common *common,
if (rx_stats->rs_rate & 0x80) {
/* HT rate */
rxs->flag |= RX_FLAG_HT;
- if (rx_stats->rs_flags & ATH9K_RX_2040)
- rxs->flag |= RX_FLAG_40MHZ;
- if (rx_stats->rs_flags & ATH9K_RX_GI)
- rxs->flag |= RX_FLAG_SHORT_GI;
+ rxs->flag |= rx_stats->flag;
rxs->rate_idx = rx_stats->rs_rate & 0x7f;
return 0;
}
@@ -958,11 +955,11 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
if (rx_stats->rs_more)
return 0;
- ath9k_process_rssi(common, hw, hdr, rx_stats);
-
if (ath9k_process_rate(common, hw, rx_stats, rx_status))
return -EINVAL;
+ ath9k_process_rssi(common, hw, hdr, rx_stats);
+
rx_status->band = hw->conf.chandef.chan->band;
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->signal = ah->noise + rx_stats->rs_rssi;
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index f7c90cc58d56..5af97442ac37 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -806,6 +806,7 @@
#define AR_SREV_REVISION_9580_10 4 /* AR9580 1.0 */
#define AR_SREV_VERSION_9462 0x280
#define AR_SREV_REVISION_9462_20 2
+#define AR_SREV_REVISION_9462_21 3
#define AR_SREV_VERSION_9565 0x2C0
#define AR_SREV_REVISION_9565_10 0
#define AR_SREV_VERSION_9550 0x400
@@ -911,10 +912,18 @@
#define AR_SREV_9462(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462))
-
#define AR_SREV_9462_20(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
- ((_ah)->hw_version.macRev == AR_SREV_REVISION_9462_20))
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9462_20))
+#define AR_SREV_9462_21(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9462_21))
+#define AR_SREV_9462_20_OR_LATER(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
+ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_20))
+#define AR_SREV_9462_21_OR_LATER(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
+ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_21))
#define AR_SREV_9565(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565))
diff --git a/drivers/net/wireless/ath/ath9k/wow.c b/drivers/net/wireless/ath/ath9k/wow.c
index 9f8563091bea..81c88dd606dc 100644
--- a/drivers/net/wireless/ath/ath9k/wow.c
+++ b/drivers/net/wireless/ath/ath9k/wow.c
@@ -34,17 +34,6 @@ const char *ath9k_hw_wow_event_to_string(u32 wow_event)
}
EXPORT_SYMBOL(ath9k_hw_wow_event_to_string);
-static void ath9k_hw_config_serdes_wow_sleep(struct ath_hw *ah)
-{
- int i;
-
- for (i = 0; i < ah->iniPcieSerdesWow.ia_rows; i++)
- REG_WRITE(ah, INI_RA(&ah->iniPcieSerdesWow, i, 0),
- INI_RA(&ah->iniPcieSerdesWow, i, 1));
-
- usleep_range(1000, 1500);
-}
-
static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -58,15 +47,8 @@ static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
ath_err(common, "Failed to stop Rx DMA in 10ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW));
return;
- } else {
- if (!AR_SREV_9300_20_OR_LATER(ah))
- REG_WRITE(ah, AR_RXDP, 0x0);
}
- /* AR9280 WoW has sleep issue, do not set it to sleep */
- if (AR_SREV_9280_20(ah))
- return;
-
REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT);
}
@@ -84,27 +66,16 @@ static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
/* set the transmit buffer */
ctl[0] = (KAL_FRAME_LEN | (MAX_RATE_POWER << 16));
-
- if (!(AR_SREV_9300_20_OR_LATER(ah)))
- ctl[0] += (KAL_ANTENNA_MODE << 25);
-
ctl[1] = 0;
ctl[3] = 0xb; /* OFDM_6M hardware value for this rate */
ctl[4] = 0;
ctl[7] = (ah->txchainmask) << 2;
-
- if (AR_SREV_9300_20_OR_LATER(ah))
- ctl[2] = 0xf << 16; /* tx_tries 0 */
- else
- ctl[2] = 0x7 << 16; /* tx_tries 0 */
-
+ ctl[2] = 0xf << 16; /* tx_tries 0 */
for (i = 0; i < KAL_NUM_DESC_WORDS; i++)
REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
- /* for AR9300 family 13 descriptor words */
- if (AR_SREV_9300_20_OR_LATER(ah))
- REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
+ REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
data_word[0] = (KAL_FRAME_TYPE << 2) | (KAL_FRAME_SUB_TYPE << 4) |
(KAL_TO_DS << 8) | (KAL_DURATION_ID << 16);
@@ -183,9 +154,6 @@ void ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
ah->wow_event_mask |= BIT(pattern_count + AR_WOW_PAT_FOUND_SHIFT);
- if (!AR_SREV_9285_12_OR_LATER(ah))
- return;
-
if (pattern_count < 4) {
/* Pattern 0-3 uses AR_WOW_LENGTH1 register */
set = (pattern_len & AR_WOW_LENGTH_MAX) <<
@@ -207,6 +175,7 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
{
u32 wow_status = 0;
u32 val = 0, rval;
+
/*
* read the WoW status register to know
* the wakeup reason
@@ -223,19 +192,14 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
val &= ah->wow_event_mask;
if (val) {
-
if (val & AR_WOW_MAGIC_PAT_FOUND)
wow_status |= AH_WOW_MAGIC_PATTERN_EN;
-
if (AR_WOW_PATTERN_FOUND(val))
wow_status |= AH_WOW_USER_PATTERN_EN;
-
if (val & AR_WOW_KEEP_ALIVE_FAIL)
wow_status |= AH_WOW_LINK_CHANGE;
-
if (val & AR_WOW_BEACON_FAIL)
wow_status |= AH_WOW_BEACON_MISS;
-
}
/*
@@ -255,17 +219,6 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
AR_WOW_CLEAR_EVENTS(REG_READ(ah, AR_WOW_PATTERN)));
/*
- * tie reset register for AR9002 family of chipsets
- * NB: not tieing it back might have some repurcussions.
- */
-
- if (!AR_SREV_9300_20_OR_LATER(ah)) {
- REG_SET_BIT(ah, AR_WA, AR_WA_UNTIE_RESET_EN |
- AR_WA_POR_SHORT | AR_WA_RESET_EN);
- }
-
-
- /*
* restore the beacon threshold to init value
*/
REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
@@ -277,8 +230,7 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
* reset to our Chip's Power On Reset so that any PCI-E
* reset from the bus will not reset our chip
*/
-
- if (AR_SREV_9280_20_OR_LATER(ah) && ah->is_pciexpress)
+ if (ah->is_pciexpress)
ath9k_hw_configpcipowersave(ah, false);
ah->wow_event_mask = 0;
@@ -298,7 +250,6 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
* are from the 'pattern_enable' in this function and
* 'pattern_count' of ath9k_hw_wow_apply_pattern()
*/
-
wow_event_mask = ah->wow_event_mask;
/*
@@ -306,50 +257,15 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
* WOW sleep, we do want the Reset from the PCI-E to disturb
* our hw state
*/
-
if (ah->is_pciexpress) {
-
/*
* we need to untie the internal POR (power-on-reset)
* to the external PCI-E reset. We also need to tie
* the PCI-E Phy reset to the PCI-E reset.
*/
-
- if (AR_SREV_9300_20_OR_LATER(ah)) {
- set = AR_WA_RESET_EN | AR_WA_POR_SHORT;
- clr = AR_WA_UNTIE_RESET_EN | AR_WA_D3_L1_DISABLE;
- REG_RMW(ah, AR_WA, set, clr);
- } else {
- if (AR_SREV_9285(ah) || AR_SREV_9287(ah))
- set = AR9285_WA_DEFAULT;
- else
- set = AR9280_WA_DEFAULT;
-
- /*
- * In AR9280 and AR9285, bit 14 in WA register
- * (disable L1) should only be set when device
- * enters D3 state and be cleared when device
- * comes back to D0
- */
-
- if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
- set |= AR_WA_D3_L1_DISABLE;
-
- clr = AR_WA_UNTIE_RESET_EN;
- set |= AR_WA_RESET_EN | AR_WA_POR_SHORT;
- REG_RMW(ah, AR_WA, set, clr);
-
- /*
- * for WoW sleep, we reprogram the SerDes so that the
- * PLL and CLK REQ are both enabled. This uses more
- * power but otherwise WoW sleep is unstable and the
- * chip may disappear.
- */
-
- if (AR_SREV_9285_12_OR_LATER(ah))
- ath9k_hw_config_serdes_wow_sleep(ah);
-
- }
+ set = AR_WA_RESET_EN | AR_WA_POR_SHORT;
+ clr = AR_WA_UNTIE_RESET_EN | AR_WA_D3_L1_DISABLE;
+ REG_RMW(ah, AR_WA, set, clr);
}
/*
@@ -378,7 +294,6 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
* Program default values for pattern backoff, aifs/slot/KAL count,
* beacon miss timeout, KAL timeout, etc.
*/
-
set = AR_WOW_BACK_OFF_SHIFT(AR_WOW_PAT_BACKOFF);
REG_SET_BIT(ah, AR_WOW_PATTERN, set);
@@ -398,7 +313,7 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
/*
* Keep alive timo in ms except AR9280
*/
- if (!pattern_enable || AR_SREV_9280(ah))
+ if (!pattern_enable)
set = AR_WOW_KEEP_ALIVE_NEVER;
else
set = KAL_TIMEOUT * 32;
@@ -420,7 +335,6 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
/*
* Configure MAC WoW Registers
*/
-
set = 0;
/* Send keep alive timeouts anyway */
clr = AR_WOW_KEEP_ALIVE_AUTO_DIS;
@@ -430,16 +344,9 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
else
set = AR_WOW_KEEP_ALIVE_FAIL_DIS;
- /*
- * FIXME: For now disable keep alive frame
- * failure. This seems to sometimes trigger
- * unnecessary wake up with AR9485 chipsets.
- */
set = AR_WOW_KEEP_ALIVE_FAIL_DIS;
-
REG_RMW(ah, AR_WOW_KEEP_ALIVE, set, clr);
-
/*
* we are relying on a bmiss failure. ensure we have
* enough threshold to prevent false positives
@@ -473,14 +380,8 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
set |= AR_WOW_MAC_INTR_EN;
REG_RMW(ah, AR_WOW_PATTERN, set, clr);
- /*
- * For AR9285 and later version of chipsets
- * enable WoW pattern match for packets less
- * than 256 bytes for all patterns
- */
- if (AR_SREV_9285_12_OR_LATER(ah))
- REG_WRITE(ah, AR_WOW_PATTERN_MATCH_LT_256B,
- AR_WOW_PATTERN_SUPPORTED);
+ REG_WRITE(ah, AR_WOW_PATTERN_MATCH_LT_256B,
+ AR_WOW_PATTERN_SUPPORTED);
/*
* Set the power states appropriately and enable PME
@@ -488,43 +389,32 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
clr = 0;
set = AR_PMCTRL_PWR_STATE_D1D3 | AR_PMCTRL_HOST_PME_EN |
AR_PMCTRL_PWR_PM_CTRL_ENA;
- /*
- * This is needed for AR9300 chipsets to wake-up
- * the host.
- */
- if (AR_SREV_9300_20_OR_LATER(ah))
- clr = AR_PCIE_PM_CTRL_ENA;
+ clr = AR_PCIE_PM_CTRL_ENA;
REG_RMW(ah, AR_PCIE_PM_CTRL, set, clr);
- if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
- /*
- * this is needed to prevent the chip waking up
- * the host within 3-4 seconds with certain
- * platform/BIOS. The fix is to enable
- * D1 & D3 to match original definition and
- * also match the OTP value. Anyway this
- * is more related to SW WOW.
- */
- clr = AR_PMCTRL_PWR_STATE_D1D3;
- REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr);
-
- set = AR_PMCTRL_PWR_STATE_D1D3_REAL;
- REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set);
- }
-
+ /*
+ * this is needed to prevent the chip waking up
+ * the host within 3-4 seconds with certain
+ * platform/BIOS. The fix is to enable
+ * D1 & D3 to match original definition and
+ * also match the OTP value. Anyway this
+ * is more related to SW WOW.
+ */
+ clr = AR_PMCTRL_PWR_STATE_D1D3;
+ REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr);
+ set = AR_PMCTRL_PWR_STATE_D1D3_REAL;
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set);
REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM);
- if (AR_SREV_9300_20_OR_LATER(ah)) {
- /* to bring down WOW power low margin */
- set = BIT(13);
- REG_SET_BIT(ah, AR_PCIE_PHY_REG3, set);
- /* HW WoW */
- clr = BIT(5);
- REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, clr);
- }
+ /* to bring down WOW power low margin */
+ set = BIT(13);
+ REG_SET_BIT(ah, AR_PCIE_PHY_REG3, set);
+ /* HW WoW */
+ clr = BIT(5);
+ REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, clr);
ath9k_hw_set_powermode_wow_sleep(ah);
ah->wow_event_mask = wow_event_mask;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 83ab6be3fe6d..927992732620 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -146,6 +146,28 @@ static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
ARRAY_SIZE(bf->rates));
}
+static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
+ struct sk_buff *skb)
+{
+ int q;
+
+ q = skb_get_queue_mapping(skb);
+ if (txq == sc->tx.uapsdq)
+ txq = sc->tx.txq_map[q];
+
+ if (txq != sc->tx.txq_map[q])
+ return;
+
+ if (WARN_ON(--txq->pending_frames < 0))
+ txq->pending_frames = 0;
+
+ if (txq->stopped &&
+ txq->pending_frames < sc->tx.txq_max_pending[q]) {
+ ieee80211_wake_queue(sc->hw, q);
+ txq->stopped = false;
+ }
+}
+
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
{
struct ath_txq *txq = tid->ac->txq;
@@ -167,6 +189,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
if (!bf) {
bf = ath_tx_setup_buffer(sc, txq, tid, skb);
if (!bf) {
+ ath_txq_skb_done(sc, txq, skb);
ieee80211_free_txskb(sc->hw, skb);
continue;
}
@@ -518,6 +541,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
!txfail);
} else {
+ if (tx_info->flags & IEEE80211_TX_STATUS_EOSP) {
+ tx_info->flags &= ~IEEE80211_TX_STATUS_EOSP;
+ ieee80211_sta_eosp(sta);
+ }
/* retry the un-acked ones */
if (bf->bf_next == NULL && bf_last->bf_stale) {
struct ath_buf *tbf;
@@ -786,25 +813,20 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
return ndelim;
}
-static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
- struct ath_txq *txq,
- struct ath_atx_tid *tid,
- struct list_head *bf_q,
- int *aggr_len)
+static struct ath_buf *
+ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
+ struct ath_atx_tid *tid)
{
-#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
- struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
- int rl = 0, nframes = 0, ndelim, prev_al = 0;
- u16 aggr_limit = 0, al = 0, bpad = 0,
- al_delta, h_baw = tid->baw_size / 2;
- enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
- struct ieee80211_tx_info *tx_info;
struct ath_frame_info *fi;
struct sk_buff *skb;
+ struct ath_buf *bf;
u16 seqno;
- do {
+ while (1) {
skb = skb_peek(&tid->buf_q);
+ if (!skb)
+ break;
+
fi = get_frame_info(skb);
bf = fi->bf;
if (!fi->bf)
@@ -812,6 +834,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
if (!bf) {
__skb_unlink(skb, &tid->buf_q);
+ ath_txq_skb_done(sc, txq, skb);
ieee80211_free_txskb(sc->hw, skb);
continue;
}
@@ -820,10 +843,8 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
seqno = bf->bf_state.seqno;
/* do not step over block-ack window */
- if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
- status = ATH_AGGR_BAW_CLOSED;
+ if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno))
break;
- }
if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
struct ath_tx_status ts = {};
@@ -837,6 +858,40 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
continue;
}
+ bf->bf_next = NULL;
+ bf->bf_lastbf = bf;
+ return bf;
+ }
+
+ return NULL;
+}
+
+static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
+ struct ath_txq *txq,
+ struct ath_atx_tid *tid,
+ struct list_head *bf_q,
+ int *aggr_len)
+{
+#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
+ struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
+ int rl = 0, nframes = 0, ndelim, prev_al = 0;
+ u16 aggr_limit = 0, al = 0, bpad = 0,
+ al_delta, h_baw = tid->baw_size / 2;
+ enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
+ struct ieee80211_tx_info *tx_info;
+ struct ath_frame_info *fi;
+ struct sk_buff *skb;
+
+ do {
+ bf = ath_tx_get_tid_subframe(sc, txq, tid);
+ if (!bf) {
+ status = ATH_AGGR_BAW_CLOSED;
+ break;
+ }
+
+ skb = bf->bf_mpdu;
+ fi = get_frame_info(skb);
+
if (!bf_first)
bf_first = bf;
@@ -882,7 +937,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
/* link buffers of this frame to the aggregate */
if (!fi->retries)
- ath_tx_addto_baw(sc, tid, seqno);
+ ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
bf->bf_state.ndelim = ndelim;
__skb_unlink(skb, &tid->buf_q);
@@ -1090,10 +1145,8 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
struct ath_txq *txq, int len)
{
struct ath_hw *ah = sc->sc_ah;
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
- struct ath_buf *bf_first = bf;
+ struct ath_buf *bf_first = NULL;
struct ath_tx_info info;
- bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
memset(&info, 0, sizeof(info));
info.is_first = true;
@@ -1101,24 +1154,11 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
info.txpower = MAX_RATE_POWER;
info.qcu = txq->axq_qnum;
- info.flags = ATH9K_TXDESC_INTREQ;
- if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
- info.flags |= ATH9K_TXDESC_NOACK;
- if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
- info.flags |= ATH9K_TXDESC_LDPC;
-
- ath_buf_set_rate(sc, bf, &info, len);
-
- if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
- info.flags |= ATH9K_TXDESC_CLRDMASK;
-
- if (bf->bf_state.bfs_paprd)
- info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
-
-
while (bf) {
struct sk_buff *skb = bf->bf_mpdu;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ath_frame_info *fi = get_frame_info(skb);
+ bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
info.type = get_hw_packet_type(skb);
if (bf->bf_next)
@@ -1126,6 +1166,26 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
else
info.link = 0;
+ if (!bf_first) {
+ bf_first = bf;
+
+ info.flags = ATH9K_TXDESC_INTREQ;
+ if ((tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) ||
+ txq == sc->tx.uapsdq)
+ info.flags |= ATH9K_TXDESC_CLRDMASK;
+
+ if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
+ info.flags |= ATH9K_TXDESC_NOACK;
+ if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
+ info.flags |= ATH9K_TXDESC_LDPC;
+
+ if (bf->bf_state.bfs_paprd)
+ info.flags |= (u32) bf->bf_state.bfs_paprd <<
+ ATH9K_TXDESC_PAPRD_S;
+
+ ath_buf_set_rate(sc, bf, &info, len);
+ }
+
info.buf_addr[0] = bf->bf_buf_addr;
info.buf_len[0] = skb->len;
info.pkt_len = fi->framelen;
@@ -1135,7 +1195,7 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
if (aggr) {
if (bf == bf_first)
info.aggr = AGGR_BUF_FIRST;
- else if (!bf->bf_next)
+ else if (bf == bf_first->bf_lastbf)
info.aggr = AGGR_BUF_LAST;
else
info.aggr = AGGR_BUF_MIDDLE;
@@ -1144,6 +1204,9 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
info.aggr_len = len;
}
+ if (bf == bf_first->bf_lastbf)
+ bf_first = NULL;
+
ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
bf = bf->bf_next;
}
@@ -1328,6 +1391,70 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
ath_txq_unlock_complete(sc, txq);
}
+void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u16 tids, int nframes,
+ enum ieee80211_frame_release_type reason,
+ bool more_data)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_node *an = (struct ath_node *)sta->drv_priv;
+ struct ath_txq *txq = sc->tx.uapsdq;
+ struct ieee80211_tx_info *info;
+ struct list_head bf_q;
+ struct ath_buf *bf_tail = NULL, *bf;
+ int sent = 0;
+ int i;
+
+ INIT_LIST_HEAD(&bf_q);
+ for (i = 0; tids && nframes; i++, tids >>= 1) {
+ struct ath_atx_tid *tid;
+
+ if (!(tids & 1))
+ continue;
+
+ tid = ATH_AN_2_TID(an, i);
+ if (tid->paused)
+ continue;
+
+ ath_txq_lock(sc, tid->ac->txq);
+ while (!skb_queue_empty(&tid->buf_q) && nframes > 0) {
+ bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid);
+ if (!bf)
+ break;
+
+ __skb_unlink(bf->bf_mpdu, &tid->buf_q);
+ list_add_tail(&bf->list, &bf_q);
+ ath_set_rates(tid->an->vif, tid->an->sta, bf);
+ ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
+ bf->bf_state.bf_type &= ~BUF_AGGR;
+ if (bf_tail)
+ bf_tail->bf_next = bf;
+
+ bf_tail = bf;
+ nframes--;
+ sent++;
+ TX_STAT_INC(txq->axq_qnum, a_queued_hw);
+
+ if (skb_queue_empty(&tid->buf_q))
+ ieee80211_sta_set_buffered(an->sta, i, false);
+ }
+ ath_txq_unlock_complete(sc, tid->ac->txq);
+ }
+
+ if (list_empty(&bf_q))
+ return;
+
+ info = IEEE80211_SKB_CB(bf_tail->bf_mpdu);
+ info->flags |= IEEE80211_TX_STATUS_EOSP;
+
+ bf = list_first_entry(&bf_q, struct ath_buf, list);
+ ath_txq_lock(sc, txq);
+ ath_tx_fill_desc(sc, bf, txq, 0);
+ ath_tx_txqaddbuf(sc, txq, &bf_q, false);
+ ath_txq_unlock(sc, txq);
+}
+
/********************/
/* Queue Management */
/********************/
@@ -1679,14 +1806,19 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
}
if (!internal) {
- txq->axq_depth++;
- if (bf_is_ampdu_not_probing(bf))
- txq->axq_ampdu_depth++;
+ while (bf) {
+ txq->axq_depth++;
+ if (bf_is_ampdu_not_probing(bf))
+ txq->axq_ampdu_depth++;
+
+ bf = bf->bf_lastbf->bf_next;
+ }
}
}
-static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
- struct sk_buff *skb, struct ath_tx_control *txctl)
+static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_txq *txq,
+ struct ath_atx_tid *tid, struct sk_buff *skb,
+ struct ath_tx_control *txctl)
{
struct ath_frame_info *fi = get_frame_info(skb);
struct list_head bf_head;
@@ -1699,22 +1831,24 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
* - seqno is not within block-ack window
* - h/w queue depth exceeds low water mark
*/
- if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
- !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
- txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
+ if ((!skb_queue_empty(&tid->buf_q) || tid->paused ||
+ !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
+ txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) &&
+ txq != sc->tx.uapsdq) {
/*
* Add this frame to software queue for scheduling later
* for aggregation.
*/
- TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
+ TX_STAT_INC(txq->axq_qnum, a_queued_sw);
__skb_queue_tail(&tid->buf_q, skb);
if (!txctl->an || !txctl->an->sleeping)
- ath_tx_queue_tid(txctl->txq, tid);
+ ath_tx_queue_tid(txq, tid);
return;
}
- bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
+ bf = ath_tx_setup_buffer(sc, txq, tid, skb);
if (!bf) {
+ ath_txq_skb_done(sc, txq, skb);
ieee80211_free_txskb(sc->hw, skb);
return;
}
@@ -1728,10 +1862,10 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
/* Queue to h/w without aggregation */
- TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
+ TX_STAT_INC(txq->axq_qnum, a_queued_hw);
bf->bf_lastbf = bf;
- ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
- ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
+ ath_tx_fill_desc(sc, bf, txq, fi->framelen);
+ ath_tx_txqaddbuf(sc, txq, &bf_head, false);
}
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
@@ -1869,22 +2003,16 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
return bf;
}
-/* Upon failure caller should free skb */
-int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
- struct ath_tx_control *txctl)
+static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct ath_tx_control *txctl)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = txctl->sta;
struct ieee80211_vif *vif = info->control.vif;
struct ath_softc *sc = hw->priv;
- struct ath_txq *txq = txctl->txq;
- struct ath_atx_tid *tid = NULL;
- struct ath_buf *bf;
- int padpos, padsize;
int frmlen = skb->len + FCS_LEN;
- u8 tidno;
- int q;
+ int padpos, padsize;
/* NOTE: sta can be NULL according to net/mac80211.h */
if (sta)
@@ -1905,6 +2033,11 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
}
+ if ((vif && vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_AP_VLAN) ||
+ !ieee80211_is_data(hdr->frame_control))
+ info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
+
/* Add the padding after the header if this is not already done */
padpos = ieee80211_hdrlen(hdr->frame_control);
padsize = padpos & 3;
@@ -1914,16 +2047,34 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
skb_push(skb, padsize);
memmove(skb->data, skb->data + padsize, padpos);
- hdr = (struct ieee80211_hdr *) skb->data;
}
- if ((vif && vif->type != NL80211_IFTYPE_AP &&
- vif->type != NL80211_IFTYPE_AP_VLAN) ||
- !ieee80211_is_data(hdr->frame_control))
- info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
-
setup_frame_info(hw, sta, skb, frmlen);
+ return 0;
+}
+
+
+/* Upon failure caller should free skb */
+int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct ath_tx_control *txctl)
+{
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *sta = txctl->sta;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct ath_softc *sc = hw->priv;
+ struct ath_txq *txq = txctl->txq;
+ struct ath_atx_tid *tid = NULL;
+ struct ath_buf *bf;
+ u8 tidno;
+ int q;
+ int ret;
+
+ ret = ath_tx_prepare(hw, skb, txctl);
+ if (ret)
+ return ret;
+ hdr = (struct ieee80211_hdr *) skb->data;
/*
* At this point, the vif, hw_key and sta pointers in the tx control
* info are no longer valid (overwritten by the ath_frame_info data.
@@ -1939,6 +2090,12 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
txq->stopped = true;
}
+ if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) {
+ ath_txq_unlock(sc, txq);
+ txq = sc->tx.uapsdq;
+ ath_txq_lock(sc, txq);
+ }
+
if (txctl->an && ieee80211_is_data_qos(hdr->frame_control)) {
tidno = ieee80211_get_qos_ctl(hdr)[0] &
IEEE80211_QOS_CTL_TID_MASK;
@@ -1952,12 +2109,13 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
* Try aggregation if it's a unicast data frame
* and the destination is HT capable.
*/
- ath_tx_send_ampdu(sc, tid, skb, txctl);
+ ath_tx_send_ampdu(sc, txq, tid, skb, txctl);
goto out;
}
- bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
+ bf = ath_tx_setup_buffer(sc, txq, tid, skb);
if (!bf) {
+ ath_txq_skb_done(sc, txq, skb);
if (txctl->paprd)
dev_kfree_skb_any(skb);
else
@@ -1971,7 +2129,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
bf->bf_state.bfs_paprd_timestamp = jiffies;
ath_set_rates(vif, sta, bf);
- ath_tx_send_normal(sc, txctl->txq, tid, skb);
+ ath_tx_send_normal(sc, txq, tid, skb);
out:
ath_txq_unlock(sc, txq);
@@ -1979,6 +2137,74 @@ out:
return 0;
}
+void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct sk_buff *skb)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_tx_control txctl = {
+ .txq = sc->beacon.cabq
+ };
+ struct ath_tx_info info = {};
+ struct ieee80211_hdr *hdr;
+ struct ath_buf *bf_tail = NULL;
+ struct ath_buf *bf;
+ LIST_HEAD(bf_q);
+ int duration = 0;
+ int max_duration;
+
+ max_duration =
+ sc->cur_beacon_conf.beacon_interval * 1000 *
+ sc->cur_beacon_conf.dtim_period / ATH_BCBUF;
+
+ do {
+ struct ath_frame_info *fi = get_frame_info(skb);
+
+ if (ath_tx_prepare(hw, skb, &txctl))
+ break;
+
+ bf = ath_tx_setup_buffer(sc, txctl.txq, NULL, skb);
+ if (!bf)
+ break;
+
+ bf->bf_lastbf = bf;
+ ath_set_rates(vif, NULL, bf);
+ ath_buf_set_rate(sc, bf, &info, fi->framelen);
+ duration += info.rates[0].PktDuration;
+ if (bf_tail)
+ bf_tail->bf_next = bf;
+
+ list_add_tail(&bf->list, &bf_q);
+ bf_tail = bf;
+ skb = NULL;
+
+ if (duration > max_duration)
+ break;
+
+ skb = ieee80211_get_buffered_bc(hw, vif);
+ } while(skb);
+
+ if (skb)
+ ieee80211_free_txskb(hw, skb);
+
+ if (list_empty(&bf_q))
+ return;
+
+ bf = list_first_entry(&bf_q, struct ath_buf, list);
+ hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;
+
+ if (hdr->frame_control & IEEE80211_FCTL_MOREDATA) {
+ hdr->frame_control &= ~IEEE80211_FCTL_MOREDATA;
+ dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
+ sizeof(*hdr), DMA_TO_DEVICE);
+ }
+
+ ath_txq_lock(sc, txctl.txq);
+ ath_tx_fill_desc(sc, bf, txctl.txq, 0);
+ ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false);
+ TX_STAT_INC(txctl.txq->axq_qnum, queued);
+ ath_txq_unlock(sc, txctl.txq);
+}
+
/*****************/
/* TX Completion */
/*****************/
@@ -1989,7 +2215,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
- int q, padpos, padsize;
+ int padpos, padsize;
unsigned long flags;
ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
@@ -2024,19 +2250,8 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
}
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
- q = skb_get_queue_mapping(skb);
- if (txq == sc->tx.txq_map[q]) {
- if (WARN_ON(--txq->pending_frames < 0))
- txq->pending_frames = 0;
-
- if (txq->stopped &&
- txq->pending_frames < sc->tx.txq_max_pending[q]) {
- ieee80211_wake_queue(sc->hw, q);
- txq->stopped = false;
- }
- }
-
__skb_queue_tail(&txq->complete_q, skb);
+ ath_txq_skb_done(sc, txq, skb);
}
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index 9dce106cd6d4..8596aba34f96 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -133,6 +133,9 @@ struct carl9170_sta_tid {
/* Preaggregation reorder queue */
struct sk_buff_head queue;
+
+ struct ieee80211_sta *sta;
+ struct ieee80211_vif *vif;
};
#define CARL9170_QUEUE_TIMEOUT 256
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index e9010a481dfd..4a33c6e39ca2 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1448,6 +1448,8 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
tid_info->state = CARL9170_TID_STATE_PROGRESS;
tid_info->tid = tid;
tid_info->max = sta_info->ampdu_max_len;
+ tid_info->sta = sta;
+ tid_info->vif = vif;
INIT_LIST_HEAD(&tid_info->list);
INIT_LIST_HEAD(&tid_info->tmp_list);
@@ -1857,6 +1859,7 @@ void *carl9170_alloc(size_t priv_size)
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
+ IEEE80211_HW_SUPPORTS_RC_TABLE |
IEEE80211_HW_SIGNAL_DBM;
if (!modparam_noht) {
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index c61cafa2665b..e3f696ee4d23 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -625,7 +625,7 @@ static void carl9170_tx_ampdu_timeout(struct ar9170 *ar)
msecs_to_jiffies(CARL9170_QUEUE_TIMEOUT)))
goto unlock;
- sta = __carl9170_get_tx_sta(ar, skb);
+ sta = iter->sta;
if (WARN_ON(!sta))
goto unlock;
@@ -866,6 +866,93 @@ static bool carl9170_tx_cts_check(struct ar9170 *ar,
return false;
}
+static void carl9170_tx_get_rates(struct ar9170 *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info;
+
+ BUILD_BUG_ON(IEEE80211_TX_MAX_RATES < CARL9170_TX_MAX_RATES);
+ BUILD_BUG_ON(IEEE80211_TX_MAX_RATES > IEEE80211_TX_RATE_TABLE_SIZE);
+
+ info = IEEE80211_SKB_CB(skb);
+
+ ieee80211_get_tx_rates(vif, sta, skb,
+ info->control.rates,
+ IEEE80211_TX_MAX_RATES);
+}
+
+static void carl9170_tx_apply_rateset(struct ar9170 *ar,
+ struct ieee80211_tx_info *sinfo,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_rate *txrate;
+ struct ieee80211_tx_info *info;
+ struct _carl9170_tx_superframe *txc = (void *) skb->data;
+ int i;
+ bool ampdu;
+ bool no_ack;
+
+ info = IEEE80211_SKB_CB(skb);
+ ampdu = !!(info->flags & IEEE80211_TX_CTL_AMPDU);
+ no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK);
+
+ /* Set the rate control probe flag for all (sub-) frames.
+ * This is because the TX_STATS_AMPDU flag is only set on
+ * the last frame, so it has to be inherited.
+ */
+ info->flags |= (sinfo->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
+
+ /* NOTE: For the first rate, the ERP & AMPDU flags are directly
+ * taken from mac_control. For all fallback rate, the firmware
+ * updates the mac_control flags from the rate info field.
+ */
+ for (i = 0; i < CARL9170_TX_MAX_RATES; i++) {
+ __le32 phy_set;
+
+ txrate = &sinfo->control.rates[i];
+ if (txrate->idx < 0)
+ break;
+
+ phy_set = carl9170_tx_physet(ar, info, txrate);
+ if (i == 0) {
+ __le16 mac_tmp = cpu_to_le16(0);
+
+ /* first rate - part of the hw's frame header */
+ txc->f.phy_control = phy_set;
+
+ if (ampdu && txrate->flags & IEEE80211_TX_RC_MCS)
+ mac_tmp |= cpu_to_le16(AR9170_TX_MAC_AGGR);
+
+ if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
+ mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
+ else if (carl9170_tx_cts_check(ar, txrate))
+ mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
+
+ txc->f.mac_control |= mac_tmp;
+ } else {
+ /* fallback rates are stored in the firmware's
+ * retry rate set array.
+ */
+ txc->s.rr[i - 1] = phy_set;
+ }
+
+ SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[i],
+ txrate->count);
+
+ if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
+ txc->s.ri[i] |= (AR9170_TX_MAC_PROT_RTS <<
+ CARL9170_TX_SUPER_RI_ERP_PROT_S);
+ else if (carl9170_tx_cts_check(ar, txrate))
+ txc->s.ri[i] |= (AR9170_TX_MAC_PROT_CTS <<
+ CARL9170_TX_SUPER_RI_ERP_PROT_S);
+
+ if (ampdu && (txrate->flags & IEEE80211_TX_RC_MCS))
+ txc->s.ri[i] |= CARL9170_TX_SUPER_RI_AMPDU;
+ }
+}
+
static int carl9170_tx_prepare(struct ar9170 *ar,
struct ieee80211_sta *sta,
struct sk_buff *skb)
@@ -874,13 +961,10 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
struct _carl9170_tx_superframe *txc;
struct carl9170_vif_info *cvif;
struct ieee80211_tx_info *info;
- struct ieee80211_tx_rate *txrate;
struct carl9170_tx_info *arinfo;
unsigned int hw_queue;
- int i;
__le16 mac_tmp;
u16 len;
- bool ampdu, no_ack;
BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
BUILD_BUG_ON(sizeof(struct _carl9170_tx_superdesc) !=
@@ -889,8 +973,6 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
BUILD_BUG_ON(sizeof(struct _ar9170_tx_hwdesc) !=
AR9170_TX_HWDESC_LEN);
- BUILD_BUG_ON(IEEE80211_TX_MAX_RATES < CARL9170_TX_MAX_RATES);
-
BUILD_BUG_ON(AR9170_MAX_VIRTUAL_MAC >
((CARL9170_TX_SUPER_MISC_VIF_ID >>
CARL9170_TX_SUPER_MISC_VIF_ID_S) + 1));
@@ -932,8 +1014,7 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) &
AR9170_TX_MAC_QOS);
- no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK);
- if (unlikely(no_ack))
+ if (unlikely(info->flags & IEEE80211_TX_CTL_NO_ACK))
mac_tmp |= cpu_to_le16(AR9170_TX_MAC_NO_ACK);
if (info->control.hw_key) {
@@ -954,8 +1035,7 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
}
}
- ampdu = !!(info->flags & IEEE80211_TX_CTL_AMPDU);
- if (ampdu) {
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
unsigned int density, factor;
if (unlikely(!sta || !cvif))
@@ -982,50 +1062,6 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
txc->s.ampdu_settings, factor);
}
- /*
- * NOTE: For the first rate, the ERP & AMPDU flags are directly
- * taken from mac_control. For all fallback rate, the firmware
- * updates the mac_control flags from the rate info field.
- */
- for (i = 0; i < CARL9170_TX_MAX_RATES; i++) {
- __le32 phy_set;
- txrate = &info->control.rates[i];
- if (txrate->idx < 0)
- break;
-
- phy_set = carl9170_tx_physet(ar, info, txrate);
- if (i == 0) {
- /* first rate - part of the hw's frame header */
- txc->f.phy_control = phy_set;
-
- if (ampdu && txrate->flags & IEEE80211_TX_RC_MCS)
- mac_tmp |= cpu_to_le16(AR9170_TX_MAC_AGGR);
- if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
- mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
- else if (carl9170_tx_cts_check(ar, txrate))
- mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
-
- } else {
- /* fallback rates are stored in the firmware's
- * retry rate set array.
- */
- txc->s.rr[i - 1] = phy_set;
- }
-
- SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[i],
- txrate->count);
-
- if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
- txc->s.ri[i] |= (AR9170_TX_MAC_PROT_RTS <<
- CARL9170_TX_SUPER_RI_ERP_PROT_S);
- else if (carl9170_tx_cts_check(ar, txrate))
- txc->s.ri[i] |= (AR9170_TX_MAC_PROT_CTS <<
- CARL9170_TX_SUPER_RI_ERP_PROT_S);
-
- if (ampdu && (txrate->flags & IEEE80211_TX_RC_MCS))
- txc->s.ri[i] |= CARL9170_TX_SUPER_RI_AMPDU;
- }
-
txc->s.len = cpu_to_le16(skb->len);
txc->f.length = cpu_to_le16(len + FCS_LEN);
txc->f.mac_control = mac_tmp;
@@ -1086,31 +1122,12 @@ static void carl9170_set_ampdu_params(struct ar9170 *ar, struct sk_buff *skb)
}
}
-static bool carl9170_tx_rate_check(struct ar9170 *ar, struct sk_buff *_dest,
- struct sk_buff *_src)
-{
- struct _carl9170_tx_superframe *dest, *src;
-
- dest = (void *) _dest->data;
- src = (void *) _src->data;
-
- /*
- * The mac80211 rate control algorithm expects that all MPDUs in
- * an AMPDU share the same tx vectors.
- * This is not really obvious right now, because the hardware
- * does the AMPDU setup according to its own rulebook.
- * Our nicely assembled, strictly monotonic increasing mpdu
- * chains will be broken up, mashed back together...
- */
-
- return (dest->f.phy_control == src->f.phy_control);
-}
-
static void carl9170_tx_ampdu(struct ar9170 *ar)
{
struct sk_buff_head agg;
struct carl9170_sta_tid *tid_info;
struct sk_buff *skb, *first;
+ struct ieee80211_tx_info *tx_info_first;
unsigned int i = 0, done_ampdus = 0;
u16 seq, queue, tmpssn;
@@ -1156,6 +1173,7 @@ retry:
goto processed;
}
+ tx_info_first = NULL;
while ((skb = skb_peek(&tid_info->queue))) {
/* strict 0, 1, ..., n - 1, n frame sequence order */
if (unlikely(carl9170_get_seq(skb) != seq))
@@ -1166,8 +1184,13 @@ retry:
(tid_info->max - 1)))
break;
- if (!carl9170_tx_rate_check(ar, skb, first))
- break;
+ if (!tx_info_first) {
+ carl9170_tx_get_rates(ar, tid_info->vif,
+ tid_info->sta, first);
+ tx_info_first = IEEE80211_SKB_CB(first);
+ }
+
+ carl9170_tx_apply_rateset(ar, tx_info_first, skb);
atomic_inc(&ar->tx_ampdu_upload);
tid_info->snx = seq = SEQ_NEXT(seq);
@@ -1182,8 +1205,7 @@ retry:
if (skb_queue_empty(&tid_info->queue) ||
carl9170_get_seq(skb_peek(&tid_info->queue)) !=
tid_info->snx) {
- /*
- * stop TID, if A-MPDU frames are still missing,
+ /* stop TID, if A-MPDU frames are still missing,
* or whenever the queue is empty.
*/
@@ -1450,12 +1472,14 @@ void carl9170_op_tx(struct ieee80211_hw *hw,
struct ar9170 *ar = hw->priv;
struct ieee80211_tx_info *info;
struct ieee80211_sta *sta = control->sta;
+ struct ieee80211_vif *vif;
bool run;
if (unlikely(!IS_STARTED(ar)))
goto err_free;
info = IEEE80211_SKB_CB(skb);
+ vif = info->control.vif;
if (unlikely(carl9170_tx_prepare(ar, sta, skb)))
goto err_free;
@@ -1486,6 +1510,8 @@ void carl9170_op_tx(struct ieee80211_hw *hw,
} else {
unsigned int queue = skb_get_queue_mapping(skb);
+ carl9170_tx_get_rates(ar, vif, sta, skb);
+ carl9170_tx_apply_rateset(ar, info, skb);
skb_queue_tail(&ar->tx_pending[queue], skb);
}
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index ccc4c718f124..7d077c752dd5 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -42,11 +42,11 @@ static int __ath_regd_init(struct ath_regulatory *reg);
NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_OFDM)
/* We allow IBSS on these on a case by case basis by regulatory domain */
-#define ATH9K_5GHZ_5150_5350 REG_RULE(5150-10, 5350+10, 40, 0, 30,\
+#define ATH9K_5GHZ_5150_5350 REG_RULE(5150-10, 5350+10, 80, 0, 30,\
NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
-#define ATH9K_5GHZ_5470_5850 REG_RULE(5470-10, 5850+10, 40, 0, 30,\
+#define ATH9K_5GHZ_5470_5850 REG_RULE(5470-10, 5850+10, 80, 0, 30,\
NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
-#define ATH9K_5GHZ_5725_5850 REG_RULE(5725-10, 5850+10, 40, 0, 30,\
+#define ATH9K_5GHZ_5725_5850 REG_RULE(5725-10, 5850+10, 80, 0, 30,\
NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
#define ATH9K_2GHZ_ALL ATH9K_2GHZ_CH01_11, \
diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig
index bac3d98a0cfb..ce8c0381825e 100644
--- a/drivers/net/wireless/ath/wil6210/Kconfig
+++ b/drivers/net/wireless/ath/wil6210/Kconfig
@@ -27,3 +27,15 @@ config WIL6210_ISR_COR
self-clear when accessed for debug purposes, it makes
such monitoring impossible.
Say y unless you debug interrupts
+
+config WIL6210_TRACING
+ bool "wil6210 tracing support"
+ depends on WIL6210
+ depends on EVENT_TRACING
+ default y
+ ---help---
+ Say Y here to enable tracepoints for the wil6210 driver
+ using the kernel tracing infrastructure. Select this
+ option if you are interested in debugging the driver.
+
+ If unsure, say Y to make it easier to debug problems.
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index d288eea0a26a..f891d514d881 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -1,15 +1,20 @@
obj-$(CONFIG_WIL6210) += wil6210.o
-wil6210-objs := main.o
-wil6210-objs += netdev.o
-wil6210-objs += cfg80211.o
-wil6210-objs += pcie_bus.o
-wil6210-objs += debugfs.o
-wil6210-objs += wmi.o
-wil6210-objs += interrupt.o
-wil6210-objs += txrx.o
+wil6210-y := main.o
+wil6210-y += netdev.o
+wil6210-y += cfg80211.o
+wil6210-y += pcie_bus.o
+wil6210-y += debugfs.o
+wil6210-y += wmi.o
+wil6210-y += interrupt.o
+wil6210-y += txrx.o
+wil6210-y += debug.o
+wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
subdir-ccflags-y += -Werror
endif
+# for tracing framework to find trace.h
+CFLAGS_trace.o := -I$(src)
+
subdir-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index c5d4a87abaaf..61c302a6bdea 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -322,12 +322,16 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
* FW don't support scan after connection attempt
*/
set_bit(wil_status_dontscan, &wil->status);
+ set_bit(wil_status_fwconnecting, &wil->status);
rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn));
if (rc == 0) {
/* Connect can take lots of time */
mod_timer(&wil->connect_timer,
jiffies + msecs_to_jiffies(2000));
+ } else {
+ clear_bit(wil_status_dontscan, &wil->status);
+ clear_bit(wil_status_fwconnecting, &wil->status);
}
out:
@@ -398,6 +402,30 @@ static int wil_cfg80211_set_default_key(struct wiphy *wiphy,
return 0;
}
+static int wil_fix_bcon(struct wil6210_priv *wil,
+ struct cfg80211_beacon_data *bcon)
+{
+ struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
+ size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
+ int rc = 0;
+
+ if (bcon->probe_resp_len <= hlen)
+ return 0;
+
+ if (!bcon->proberesp_ies) {
+ bcon->proberesp_ies = f->u.probe_resp.variable;
+ bcon->proberesp_ies_len = bcon->probe_resp_len - hlen;
+ rc = 1;
+ }
+ if (!bcon->assocresp_ies) {
+ bcon->assocresp_ies = f->u.probe_resp.variable;
+ bcon->assocresp_ies_len = bcon->probe_resp_len - hlen;
+ rc = 1;
+ }
+
+ return rc;
+}
+
static int wil_cfg80211_start_ap(struct wiphy *wiphy,
struct net_device *ndev,
struct cfg80211_ap_settings *info)
@@ -419,10 +447,18 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
info->ssid, info->ssid_len);
+ if (wil_fix_bcon(wil, bcon))
+ wil_dbg_misc(wil, "Fixed bcon\n");
+
rc = wil_reset(wil);
if (rc)
return rc;
+ /* Rx VRING. */
+ rc = wil_rx_init(wil);
+ if (rc)
+ return rc;
+
rc = wmi_set_ssid(wil, info->ssid_len, info->ssid);
if (rc)
return rc;
@@ -451,8 +487,6 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
if (rc)
return rc;
- /* Rx VRING. After MAC and beacon */
- rc = wil_rx_init(wil);
netif_carrier_on(ndev);
diff --git a/drivers/net/wireless/ath/wil6210/debug.c b/drivers/net/wireless/ath/wil6210/debug.c
new file mode 100644
index 000000000000..9eeabf4a5879
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/debug.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "wil6210.h"
+#include "trace.h"
+
+int wil_err(struct wil6210_priv *wil, const char *fmt, ...)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+ int ret;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ ret = netdev_err(ndev, "%pV", &vaf);
+ trace_wil6210_log_err(&vaf);
+ va_end(args);
+
+ return ret;
+}
+
+int wil_info(struct wil6210_priv *wil, const char *fmt, ...)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+ int ret;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ ret = netdev_info(ndev, "%pV", &vaf);
+ trace_wil6210_log_info(&vaf);
+ va_end(args);
+
+ return ret;
+}
+
+int wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ trace_wil6210_log_dbg(&vaf);
+ va_end(args);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 727b1f53e6ad..ab636767fbde 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -145,7 +145,7 @@ static void wil_print_ring(struct seq_file *s, const char *prefix,
le16_to_cpu(hdr.type), hdr.flags);
if (len <= MAX_MBOXITEM_SIZE) {
int n = 0;
- unsigned char printbuf[16 * 3 + 2];
+ char printbuf[16 * 3 + 2];
unsigned char databuf[MAX_MBOXITEM_SIZE];
void __iomem *src = wmi_buffer(wil, d.addr) +
sizeof(struct wil6210_mbox_hdr);
@@ -416,11 +416,17 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
seq_printf(s, " SKB = %p\n", skb);
if (skb) {
- unsigned char printbuf[16 * 3 + 2];
+ char printbuf[16 * 3 + 2];
int i = 0;
- int len = skb_headlen(skb);
+ int len = le16_to_cpu(d->dma.length);
void *p = skb->data;
+ if (len != skb_headlen(skb)) {
+ seq_printf(s, "!!! len: desc = %d skb = %d\n",
+ len, skb_headlen(skb));
+ len = min_t(int, len, skb_headlen(skb));
+ }
+
seq_printf(s, " len = %d\n", len);
while (i < len) {
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index e3c1e7684f9c..8205d3e4ab66 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -17,6 +17,7 @@
#include <linux/interrupt.h>
#include "wil6210.h"
+#include "trace.h"
/**
* Theory of operation:
@@ -103,14 +104,14 @@ static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
clear_bit(wil_status_irqen, &wil->status);
}
-static void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
+void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
{
iowrite32(WIL6210_IMC_TX, wil->csr +
HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, IMC));
}
-static void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
+void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
{
iowrite32(WIL6210_IMC_RX, wil->csr +
HOSTADDR(RGF_DMA_EP_RX_ICR) +
@@ -168,6 +169,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
HOSTADDR(RGF_DMA_EP_RX_ICR) +
offsetof(struct RGF_ICR, ICR));
+ trace_wil6210_irq_rx(isr);
wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
if (!isr) {
@@ -180,13 +182,14 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
if (isr & BIT_DMA_EP_RX_ICR_RX_DONE) {
wil_dbg_irq(wil, "RX done\n");
isr &= ~BIT_DMA_EP_RX_ICR_RX_DONE;
- wil_rx_handle(wil);
+ wil_dbg_txrx(wil, "NAPI schedule\n");
+ napi_schedule(&wil->napi_rx);
}
if (isr)
wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr);
- wil6210_unmask_irq_rx(wil);
+ /* Rx IRQ will be enabled when NAPI processing finished */
return IRQ_HANDLED;
}
@@ -198,6 +201,7 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, ICR));
+ trace_wil6210_irq_tx(isr);
wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
if (!isr) {
@@ -208,23 +212,17 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
wil6210_mask_irq_tx(wil);
if (isr & BIT_DMA_EP_TX_ICR_TX_DONE) {
- uint i;
wil_dbg_irq(wil, "TX done\n");
+ napi_schedule(&wil->napi_tx);
isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
- for (i = 0; i < 24; i++) {
- u32 mask = BIT_DMA_EP_TX_ICR_TX_DONE_N(i);
- if (isr & mask) {
- isr &= ~mask;
- wil_dbg_irq(wil, "TX done(%i)\n", i);
- wil_tx_complete(wil, i);
- }
- }
+ /* clear also all VRING interrupts */
+ isr &= ~(BIT(25) - 1UL);
}
if (isr)
wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr);
- wil6210_unmask_irq_tx(wil);
+ /* Tx IRQ will be enabled when NAPI processing finished */
return IRQ_HANDLED;
}
@@ -256,6 +254,7 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICR));
+ trace_wil6210_irq_misc(isr);
wil_dbg_irq(wil, "ISR MISC 0x%08x\n", isr);
if (!isr) {
@@ -301,6 +300,7 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
struct wil6210_priv *wil = cookie;
u32 isr = wil->isr_misc;
+ trace_wil6210_irq_misc_thread(isr);
wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr);
if (isr & ISR_MISC_FW_ERROR) {
@@ -408,6 +408,7 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
if (wil6210_debug_irq_mask(wil, pseudo_cause))
return IRQ_NONE;
+ trace_wil6210_irq_pseudo(pseudo_cause);
wil_dbg_irq(wil, "Pseudo IRQ 0x%08x\n", pseudo_cause);
wil6210_mask_irq_pseudo(wil);
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index a0478e2f6868..0a2844c48a60 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -56,27 +56,21 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
{
uint i;
struct net_device *ndev = wil_to_ndev(wil);
- struct wireless_dev *wdev = wil->wdev;
wil_dbg_misc(wil, "%s()\n", __func__);
wil_link_off(wil);
- clear_bit(wil_status_fwconnected, &wil->status);
-
- switch (wdev->sme_state) {
- case CFG80211_SME_CONNECTED:
- cfg80211_disconnected(ndev, WLAN_STATUS_UNSPECIFIED_FAILURE,
+ if (test_bit(wil_status_fwconnected, &wil->status)) {
+ clear_bit(wil_status_fwconnected, &wil->status);
+ cfg80211_disconnected(ndev,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
NULL, 0, GFP_KERNEL);
- break;
- case CFG80211_SME_CONNECTING:
+ } else if (test_bit(wil_status_fwconnecting, &wil->status)) {
cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
WLAN_STATUS_UNSPECIFIED_FAILURE,
GFP_KERNEL);
- break;
- default:
- break;
}
-
+ clear_bit(wil_status_fwconnecting, &wil->status);
for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++)
wil_vring_fini_tx(wil, i);
@@ -292,41 +286,36 @@ static int __wil_up(struct wil6210_priv *wil)
{
struct net_device *ndev = wil_to_ndev(wil);
struct wireless_dev *wdev = wil->wdev;
- struct ieee80211_channel *channel = wdev->preset_chandef.chan;
int rc;
- int bi;
- u16 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
rc = wil_reset(wil);
if (rc)
return rc;
- /* FIXME Firmware works now in PBSS mode(ToDS=0, FromDS=0) */
- wmi_nettype = wil_iftype_nl2wmi(NL80211_IFTYPE_ADHOC);
+ /* Rx VRING. After MAC and beacon */
+ rc = wil_rx_init(wil);
+ if (rc)
+ return rc;
+
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
wil_dbg_misc(wil, "type: STATION\n");
- bi = 0;
ndev->type = ARPHRD_ETHER;
break;
case NL80211_IFTYPE_AP:
wil_dbg_misc(wil, "type: AP\n");
- bi = 100;
ndev->type = ARPHRD_ETHER;
break;
case NL80211_IFTYPE_P2P_CLIENT:
wil_dbg_misc(wil, "type: P2P_CLIENT\n");
- bi = 0;
ndev->type = ARPHRD_ETHER;
break;
case NL80211_IFTYPE_P2P_GO:
wil_dbg_misc(wil, "type: P2P_GO\n");
- bi = 100;
ndev->type = ARPHRD_ETHER;
break;
case NL80211_IFTYPE_MONITOR:
wil_dbg_misc(wil, "type: Monitor\n");
- bi = 0;
ndev->type = ARPHRD_IEEE80211_RADIOTAP;
/* ARPHRD_IEEE80211 or ARPHRD_IEEE80211_RADIOTAP ? */
break;
@@ -334,36 +323,12 @@ static int __wil_up(struct wil6210_priv *wil)
return -EOPNOTSUPP;
}
- /* Apply profile in the following order: */
- /* SSID and channel for the AP */
- switch (wdev->iftype) {
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_P2P_GO:
- if (wdev->ssid_len == 0) {
- wil_err(wil, "SSID not set\n");
- return -EINVAL;
- }
- rc = wmi_set_ssid(wil, wdev->ssid_len, wdev->ssid);
- if (rc)
- return rc;
- break;
- default:
- break;
- }
-
/* MAC address - pre-requisite for other commands */
wmi_set_mac_address(wil, ndev->dev_addr);
- /* Set up beaconing if required. */
- if (bi > 0) {
- rc = wmi_pcp_start(wil, bi, wmi_nettype,
- (channel ? channel->hw_value : 0));
- if (rc)
- return rc;
- }
- /* Rx VRING. After MAC and beacon */
- wil_rx_init(wil);
+ napi_enable(&wil->napi_rx);
+ napi_enable(&wil->napi_tx);
return 0;
}
@@ -381,6 +346,9 @@ int wil_up(struct wil6210_priv *wil)
static int __wil_down(struct wil6210_priv *wil)
{
+ napi_disable(&wil->napi_rx);
+ napi_disable(&wil->napi_tx);
+
if (wil->scan_request) {
cfg80211_scan_done(wil->scan_request, true);
wil->scan_request = NULL;
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 098a8ec6b841..29dd1e58cb17 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -40,6 +40,55 @@ static const struct net_device_ops wil_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
+static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget)
+{
+ struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
+ napi_rx);
+ int quota = budget;
+ int done;
+
+ wil_rx_handle(wil, &quota);
+ done = budget - quota;
+
+ if (done <= 1) { /* burst ends - only one packet processed */
+ napi_complete(napi);
+ wil6210_unmask_irq_rx(wil);
+ wil_dbg_txrx(wil, "NAPI RX complete\n");
+ }
+
+ wil_dbg_txrx(wil, "NAPI RX poll(%d) done %d\n", budget, done);
+
+ return done;
+}
+
+static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
+ napi_tx);
+ int tx_done = 0;
+ uint i;
+
+ /* always process ALL Tx complete, regardless budget - it is fast */
+ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
+ struct vring *vring = &wil->vring_tx[i];
+
+ if (!vring->va)
+ continue;
+
+ tx_done += wil_tx_complete(wil, i);
+ }
+
+ if (tx_done <= 1) { /* burst ends - only one packet processed */
+ napi_complete(napi);
+ wil6210_unmask_irq_tx(wil);
+ wil_dbg_txrx(wil, "NAPI TX complete\n");
+ }
+
+ wil_dbg_txrx(wil, "NAPI TX poll(%d) done %d\n", budget, tx_done);
+
+ return min(tx_done, budget);
+}
+
void *wil_if_alloc(struct device *dev, void __iomem *csr)
{
struct net_device *ndev;
@@ -81,6 +130,11 @@ void *wil_if_alloc(struct device *dev, void __iomem *csr)
SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
wdev->netdev = ndev;
+ netif_napi_add(ndev, &wil->napi_rx, wil6210_netdev_poll_rx,
+ WIL6210_NAPI_BUDGET);
+ netif_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx,
+ WIL6210_NAPI_BUDGET);
+
wil_link_off(wil);
return wil;
diff --git a/drivers/net/wireless/ath/wil6210/trace.c b/drivers/net/wireless/ath/wil6210/trace.c
new file mode 100644
index 000000000000..cd2534b9c5aa
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/trace.c
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h
new file mode 100644
index 000000000000..eff1239be53a
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/trace.h
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM wil6210
+#if !defined(WIL6210_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define WIL6210_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "wil6210.h"
+#include "txrx.h"
+
+/* create empty functions when tracing is disabled */
+#if !defined(CONFIG_WIL6210_TRACING) || defined(__CHECKER__)
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(...)
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(evt_class, name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif /* !CONFIG_WIL6210_TRACING || defined(__CHECKER__) */
+
+DECLARE_EVENT_CLASS(wil6210_wmi,
+ TP_PROTO(u16 id, void *buf, u16 buf_len),
+
+ TP_ARGS(id, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __field(u16, id)
+ __field(u16, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "id 0x%04x len %d",
+ __entry->id, __entry->buf_len
+ )
+);
+
+DEFINE_EVENT(wil6210_wmi, wil6210_wmi_cmd,
+ TP_PROTO(u16 id, void *buf, u16 buf_len),
+ TP_ARGS(id, buf, buf_len)
+);
+
+DEFINE_EVENT(wil6210_wmi, wil6210_wmi_event,
+ TP_PROTO(u16 id, void *buf, u16 buf_len),
+ TP_ARGS(id, buf, buf_len)
+);
+
+#define WIL6210_MSG_MAX (200)
+
+DECLARE_EVENT_CLASS(wil6210_log_event,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf),
+ TP_STRUCT__entry(
+ __dynamic_array(char, msg, WIL6210_MSG_MAX)
+ ),
+ TP_fast_assign(
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ WIL6210_MSG_MAX,
+ vaf->fmt,
+ *vaf->va) >= WIL6210_MSG_MAX);
+ ),
+ TP_printk("%s", __get_str(msg))
+);
+
+DEFINE_EVENT(wil6210_log_event, wil6210_log_err,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(wil6210_log_event, wil6210_log_info,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(wil6210_log_event, wil6210_log_dbg,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+#define wil_pseudo_irq_cause(x) __print_flags(x, "|", \
+ {BIT_DMA_PSEUDO_CAUSE_RX, "Rx" }, \
+ {BIT_DMA_PSEUDO_CAUSE_TX, "Tx" }, \
+ {BIT_DMA_PSEUDO_CAUSE_MISC, "Misc" })
+
+TRACE_EVENT(wil6210_irq_pseudo,
+ TP_PROTO(u32 x),
+ TP_ARGS(x),
+ TP_STRUCT__entry(
+ __field(u32, x)
+ ),
+ TP_fast_assign(
+ __entry->x = x;
+ ),
+ TP_printk("cause 0x%08x : %s", __entry->x,
+ wil_pseudo_irq_cause(__entry->x))
+);
+
+DECLARE_EVENT_CLASS(wil6210_irq,
+ TP_PROTO(u32 x),
+ TP_ARGS(x),
+ TP_STRUCT__entry(
+ __field(u32, x)
+ ),
+ TP_fast_assign(
+ __entry->x = x;
+ ),
+ TP_printk("cause 0x%08x", __entry->x)
+);
+
+DEFINE_EVENT(wil6210_irq, wil6210_irq_rx,
+ TP_PROTO(u32 x),
+ TP_ARGS(x)
+);
+
+DEFINE_EVENT(wil6210_irq, wil6210_irq_tx,
+ TP_PROTO(u32 x),
+ TP_ARGS(x)
+);
+
+DEFINE_EVENT(wil6210_irq, wil6210_irq_misc,
+ TP_PROTO(u32 x),
+ TP_ARGS(x)
+);
+
+DEFINE_EVENT(wil6210_irq, wil6210_irq_misc_thread,
+ TP_PROTO(u32 x),
+ TP_ARGS(x)
+);
+
+TRACE_EVENT(wil6210_rx,
+ TP_PROTO(u16 index, struct vring_rx_desc *d),
+ TP_ARGS(index, d),
+ TP_STRUCT__entry(
+ __field(u16, index)
+ __field(unsigned int, len)
+ __field(u8, mid)
+ __field(u8, cid)
+ __field(u8, tid)
+ __field(u8, type)
+ __field(u8, subtype)
+ __field(u16, seq)
+ __field(u8, mcs)
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __entry->len = d->dma.length;
+ __entry->mid = wil_rxdesc_mid(d);
+ __entry->cid = wil_rxdesc_cid(d);
+ __entry->tid = wil_rxdesc_tid(d);
+ __entry->type = wil_rxdesc_ftype(d);
+ __entry->subtype = wil_rxdesc_subtype(d);
+ __entry->seq = wil_rxdesc_seq(d);
+ __entry->mcs = wil_rxdesc_mcs(d);
+ ),
+ TP_printk("index %d len %d mid %d cid %d tid %d mcs %d seq 0x%03x"
+ " type 0x%1x subtype 0x%1x", __entry->index, __entry->len,
+ __entry->mid, __entry->cid, __entry->tid, __entry->mcs,
+ __entry->seq, __entry->type, __entry->subtype)
+);
+
+TRACE_EVENT(wil6210_tx,
+ TP_PROTO(u8 vring, u16 index, unsigned int len, u8 frags),
+ TP_ARGS(vring, index, len, frags),
+ TP_STRUCT__entry(
+ __field(u8, vring)
+ __field(u8, frags)
+ __field(u16, index)
+ __field(unsigned int, len)
+ ),
+ TP_fast_assign(
+ __entry->vring = vring;
+ __entry->frags = frags;
+ __entry->index = index;
+ __entry->len = len;
+ ),
+ TP_printk("vring %d index %d len %d frags %d",
+ __entry->vring, __entry->index, __entry->len, __entry->frags)
+);
+
+TRACE_EVENT(wil6210_tx_done,
+ TP_PROTO(u8 vring, u16 index, unsigned int len, u8 err),
+ TP_ARGS(vring, index, len, err),
+ TP_STRUCT__entry(
+ __field(u8, vring)
+ __field(u8, err)
+ __field(u16, index)
+ __field(unsigned int, len)
+ ),
+ TP_fast_assign(
+ __entry->vring = vring;
+ __entry->index = index;
+ __entry->len = len;
+ __entry->err = err;
+ ),
+ TP_printk("vring %d index %d len %d err 0x%02x",
+ __entry->vring, __entry->index, __entry->len,
+ __entry->err)
+);
+
+#endif /* WIL6210_TRACE_H || TRACE_HEADER_MULTI_READ*/
+
+#if defined(CONFIG_WIL6210_TRACING) && !defined(__CHECKER__)
+/* we don't want to use include/trace/events */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
+#endif /* defined(CONFIG_WIL6210_TRACING) && !defined(__CHECKER__) */
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 797024507c71..d240b24e1ccf 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -22,6 +22,7 @@
#include "wil6210.h"
#include "wmi.h"
#include "txrx.h"
+#include "trace.h"
static bool rtap_include_phy_info;
module_param(rtap_include_phy_info, bool, S_IRUGO);
@@ -89,8 +90,8 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
* we can use any
*/
for (i = 0; i < vring->size; i++) {
- volatile struct vring_tx_desc *d = &(vring->va[i].tx);
- d->dma.status = TX_DMA_STATUS_DU;
+ volatile struct vring_tx_desc *_d = &(vring->va[i].tx);
+ _d->dma.status = TX_DMA_STATUS_DU;
}
wil_dbg_misc(wil, "vring[%d] 0x%p:0x%016llx 0x%p\n", vring->size,
@@ -106,30 +107,39 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
size_t sz = vring->size * sizeof(vring->va[0]);
while (!wil_vring_is_empty(vring)) {
+ dma_addr_t pa;
+ struct sk_buff *skb;
+ u16 dmalen;
+
if (tx) {
- volatile struct vring_tx_desc *d =
+ struct vring_tx_desc dd, *d = &dd;
+ volatile struct vring_tx_desc *_d =
&vring->va[vring->swtail].tx;
- dma_addr_t pa = d->dma.addr_low |
- ((u64)d->dma.addr_high << 32);
- struct sk_buff *skb = vring->ctx[vring->swtail];
+
+ *d = *_d;
+ pa = wil_desc_addr(&d->dma.addr);
+ dmalen = le16_to_cpu(d->dma.length);
+ skb = vring->ctx[vring->swtail];
if (skb) {
- dma_unmap_single(dev, pa, d->dma.length,
+ dma_unmap_single(dev, pa, dmalen,
DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
vring->ctx[vring->swtail] = NULL;
} else {
- dma_unmap_page(dev, pa, d->dma.length,
+ dma_unmap_page(dev, pa, dmalen,
DMA_TO_DEVICE);
}
vring->swtail = wil_vring_next_tail(vring);
} else { /* rx */
- volatile struct vring_rx_desc *d =
+ struct vring_rx_desc dd, *d = &dd;
+ volatile struct vring_rx_desc *_d =
&vring->va[vring->swtail].rx;
- dma_addr_t pa = d->dma.addr_low |
- ((u64)d->dma.addr_high << 32);
- struct sk_buff *skb = vring->ctx[vring->swhead];
- dma_unmap_single(dev, pa, d->dma.length,
- DMA_FROM_DEVICE);
+
+ *d = *_d;
+ pa = wil_desc_addr(&d->dma.addr);
+ dmalen = le16_to_cpu(d->dma.length);
+ skb = vring->ctx[vring->swhead];
+ dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
kfree_skb(skb);
wil_vring_advance_head(vring, 1);
}
@@ -151,7 +161,8 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
{
struct device *dev = wil_to_dev(wil);
unsigned int sz = RX_BUF_LEN;
- volatile struct vring_rx_desc *d = &(vring->va[i].rx);
+ struct vring_rx_desc dd, *d = &dd;
+ volatile struct vring_rx_desc *_d = &(vring->va[i].rx);
dma_addr_t pa;
/* TODO align */
@@ -169,13 +180,13 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
}
d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
- d->dma.addr_low = lower_32_bits(pa);
- d->dma.addr_high = (u16)upper_32_bits(pa);
+ wil_desc_addr_set(&d->dma.addr, pa);
/* ip_length don't care */
/* b11 don't care */
/* error don't care */
d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
- d->dma.length = sz;
+ d->dma.length = cpu_to_le16(sz);
+ *_d = *d;
vring->ctx[i] = skb;
return 0;
@@ -321,11 +332,12 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
{
struct device *dev = wil_to_dev(wil);
struct net_device *ndev = wil_to_ndev(wil);
- volatile struct vring_rx_desc *d;
- struct vring_rx_desc *d1;
+ volatile struct vring_rx_desc *_d;
+ struct vring_rx_desc *d;
struct sk_buff *skb;
dma_addr_t pa;
unsigned int sz = RX_BUF_LEN;
+ u16 dmalen;
u8 ftype;
u8 ds_bits;
@@ -334,32 +346,44 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
if (wil_vring_is_empty(vring))
return NULL;
- d = &(vring->va[vring->swhead].rx);
- if (!(d->dma.status & RX_DMA_STATUS_DU)) {
+ _d = &(vring->va[vring->swhead].rx);
+ if (!(_d->dma.status & RX_DMA_STATUS_DU)) {
/* it is not error, we just reached end of Rx done area */
return NULL;
}
- pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
skb = vring->ctx[vring->swhead];
+ d = wil_skb_rxdesc(skb);
+ *d = *_d;
+ pa = wil_desc_addr(&d->dma.addr);
+ vring->ctx[vring->swhead] = NULL;
+ wil_vring_advance_head(vring, 1);
+
dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
- skb_trim(skb, d->dma.length);
+ dmalen = le16_to_cpu(d->dma.length);
+
+ trace_wil6210_rx(vring->swhead, d);
+ wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", vring->swhead, dmalen);
+ wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
+
+ if (dmalen > sz) {
+ wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
+ kfree_skb(skb);
+ return NULL;
+ }
+ skb_trim(skb, dmalen);
+
+ wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb_headlen(skb), false);
- d1 = wil_skb_rxdesc(skb);
- *d1 = *d;
- wil->stats.last_mcs_rx = wil_rxdesc_mcs(d1);
+ wil->stats.last_mcs_rx = wil_rxdesc_mcs(d);
/* use radiotap header only if required */
if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
wil_rx_add_radiotap_header(wil, skb);
- wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", vring->swhead, d->dma.length);
- wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4,
- (const void *)d, sizeof(*d), false);
-
- wil_vring_advance_head(vring, 1);
-
/* no extra checks if in sniffer mode */
if (ndev->type != ARPHRD_ETHER)
return skb;
@@ -368,7 +392,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
* Driver should recognize it by frame type, that is found
* in Rx descriptor. If type is not data, it is 802.11 frame as is
*/
- ftype = wil_rxdesc_ftype(d1) << 2;
+ ftype = wil_rxdesc_ftype(d) << 2;
if (ftype != IEEE80211_FTYPE_DATA) {
wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype);
/* TODO: process it */
@@ -383,7 +407,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
return NULL;
}
- ds_bits = wil_rxdesc_ds_bits(d1);
+ ds_bits = wil_rxdesc_ds_bits(d);
if (ds_bits == 1) {
/*
* HW bug - in ToDS mode, i.e. Rx on AP side,
@@ -425,6 +449,7 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
/*
* Pass Rx packet to the netif. Update statistics.
+ * Called in softirq context (NAPI poll).
*/
static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
{
@@ -433,10 +458,7 @@ static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
skb_orphan(skb);
- if (in_interrupt())
- rc = netif_rx(skb);
- else
- rc = netif_rx_ni(skb);
+ rc = netif_receive_skb(skb);
if (likely(rc == NET_RX_SUCCESS)) {
ndev->stats.rx_packets++;
@@ -450,9 +472,9 @@ static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
/**
* Proceed all completed skb's from Rx VRING
*
- * Safe to call from IRQ
+ * Safe to call from NAPI poll, i.e. softirq with interrupts enabled
*/
-void wil_rx_handle(struct wil6210_priv *wil)
+void wil_rx_handle(struct wil6210_priv *wil, int *quota)
{
struct net_device *ndev = wil_to_ndev(wil);
struct vring *v = &wil->vring_rx;
@@ -463,9 +485,8 @@ void wil_rx_handle(struct wil6210_priv *wil)
return;
}
wil_dbg_txrx(wil, "%s()\n", __func__);
- while (NULL != (skb = wil_vring_reap_rx(wil, v))) {
- wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
- skb->data, skb_headlen(skb), false);
+ while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) {
+ (*quota)--;
if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
skb->dev = ndev;
@@ -600,18 +621,17 @@ static struct vring *wil_find_tx_vring(struct wil6210_priv *wil,
return NULL;
}
-static int wil_tx_desc_map(volatile struct vring_tx_desc *d,
- dma_addr_t pa, u32 len)
+static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
+ int vring_index)
{
- d->dma.addr_low = lower_32_bits(pa);
- d->dma.addr_high = (u16)upper_32_bits(pa);
+ wil_desc_addr_set(&d->dma.addr, pa);
d->dma.ip_length = 0;
/* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
d->dma.b11 = 0/*14 | BIT(7)*/;
d->dma.error = 0;
d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
- d->dma.length = len;
- d->dma.d0 = 0;
+ d->dma.length = cpu_to_le16((u16)len);
+ d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
d->mac.d[0] = 0;
d->mac.d[1] = 0;
d->mac.d[2] = 0;
@@ -630,7 +650,8 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
struct sk_buff *skb)
{
struct device *dev = wil_to_dev(wil);
- volatile struct vring_tx_desc *d;
+ struct vring_tx_desc dd, *d = &dd;
+ volatile struct vring_tx_desc *_d;
u32 swhead = vring->swhead;
int avail = wil_vring_avail_tx(vring);
int nr_frags = skb_shinfo(skb)->nr_frags;
@@ -648,7 +669,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
1 + nr_frags);
return -ENOMEM;
}
- d = &(vring->va[i].tx);
+ _d = &(vring->va[i].tx);
/* FIXME FW can accept only unicast frames for the peer */
memcpy(skb->data, wil->dst_addr[vring_index], ETH_ALEN);
@@ -664,28 +685,32 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
if (unlikely(dma_mapping_error(dev, pa)))
return -EINVAL;
/* 1-st segment */
- wil_tx_desc_map(d, pa, skb_headlen(skb));
+ wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index);
d->mac.d[2] |= ((nr_frags + 1) <<
MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
+ if (nr_frags)
+ *_d = *d;
+
/* middle segments */
for (f = 0; f < nr_frags; f++) {
const struct skb_frag_struct *frag =
&skb_shinfo(skb)->frags[f];
int len = skb_frag_size(frag);
i = (swhead + f + 1) % vring->size;
- d = &(vring->va[i].tx);
+ _d = &(vring->va[i].tx);
pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, pa)))
goto dma_error;
- wil_tx_desc_map(d, pa, len);
+ wil_tx_desc_map(d, pa, len, vring_index);
vring->ctx[i] = NULL;
+ *_d = *d;
}
/* for the last seg only */
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
- d->dma.d0 |= BIT(9); /* BUG: undocumented bit */
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS);
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
- d->dma.d0 |= (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
+ *_d = *d;
wil_hex_dump_txrx("Tx ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false);
@@ -693,6 +718,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
/* advance swhead */
wil_vring_advance_head(vring, nr_frags + 1);
wil_dbg_txrx(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead);
+ trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
/* hold reference to skb
* to prevent skb release before accounting
@@ -705,14 +731,18 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
/* unmap what we have mapped */
/* Note: increment @f to operate with positive index */
for (f++; f > 0; f--) {
+ u16 dmalen;
+
i = (swhead + f) % vring->size;
- d = &(vring->va[i].tx);
- d->dma.status = TX_DMA_STATUS_DU;
- pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
+ _d = &(vring->va[i].tx);
+ *d = *_d;
+ _d->dma.status = TX_DMA_STATUS_DU;
+ pa = wil_desc_addr(&d->dma.addr);
+ dmalen = le16_to_cpu(d->dma.length);
if (vring->ctx[i])
- dma_unmap_single(dev, pa, d->dma.length, DMA_TO_DEVICE);
+ dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
else
- dma_unmap_page(dev, pa, d->dma.length, DMA_TO_DEVICE);
+ dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
}
return -EINVAL;
@@ -738,18 +768,16 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
wil_err(wil, "Xmit in monitor mode not supported\n");
goto drop;
}
- if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
- rc = wmi_tx_eapol(wil, skb);
- } else {
- /* find vring */
- vring = wil_find_tx_vring(wil, skb);
- if (!vring) {
- wil_err(wil, "No Tx VRING available\n");
- goto drop;
- }
- /* set up vring entry */
- rc = wil_tx_vring(wil, vring, skb);
+
+ /* find vring */
+ vring = wil_find_tx_vring(wil, skb);
+ if (!vring) {
+ wil_err(wil, "No Tx VRING available\n");
+ goto drop;
}
+ /* set up vring entry */
+ rc = wil_tx_vring(wil, vring, skb);
+
switch (rc) {
case 0:
/* statistics will be updated on the tx_complete */
@@ -761,7 +789,6 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
break; /* goto drop; */
}
drop:
- netif_tx_stop_all_queues(ndev);
ndev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
@@ -771,41 +798,48 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/**
* Clean up transmitted skb's from the Tx VRING
*
+ * Return number of descriptors cleared
+ *
* Safe to call from IRQ
*/
-void wil_tx_complete(struct wil6210_priv *wil, int ringid)
+int wil_tx_complete(struct wil6210_priv *wil, int ringid)
{
struct net_device *ndev = wil_to_ndev(wil);
struct device *dev = wil_to_dev(wil);
struct vring *vring = &wil->vring_tx[ringid];
+ int done = 0;
if (!vring->va) {
wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
- return;
+ return 0;
}
wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
while (!wil_vring_is_empty(vring)) {
- volatile struct vring_tx_desc *d1 =
+ volatile struct vring_tx_desc *_d =
&vring->va[vring->swtail].tx;
struct vring_tx_desc dd, *d = &dd;
dma_addr_t pa;
struct sk_buff *skb;
+ u16 dmalen;
- dd = *d1;
+ *d = *_d;
if (!(d->dma.status & TX_DMA_STATUS_DU))
break;
+ dmalen = le16_to_cpu(d->dma.length);
+ trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
+ d->dma.error);
wil_dbg_txrx(wil,
"Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n",
- vring->swtail, d->dma.length, d->dma.status,
+ vring->swtail, dmalen, d->dma.status,
d->dma.error);
wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false);
- pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
+ pa = wil_desc_addr(&d->dma.addr);
skb = vring->ctx[vring->swtail];
if (skb) {
if (d->dma.error == 0) {
@@ -815,18 +849,21 @@ void wil_tx_complete(struct wil6210_priv *wil, int ringid)
ndev->stats.tx_errors++;
}
- dma_unmap_single(dev, pa, d->dma.length, DMA_TO_DEVICE);
+ dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
vring->ctx[vring->swtail] = NULL;
} else {
- dma_unmap_page(dev, pa, d->dma.length, DMA_TO_DEVICE);
+ dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
}
- d->dma.addr_low = 0;
- d->dma.addr_high = 0;
+ d->dma.addr.addr_low = 0;
+ d->dma.addr.addr_high = 0;
d->dma.length = 0;
d->dma.status = TX_DMA_STATUS_DU;
vring->swtail = wil_vring_next_tail(vring);
+ done++;
}
if (wil_vring_avail_tx(vring) > vring->size/4)
netif_tx_wake_all_queues(wil_to_ndev(wil));
+
+ return done;
}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index adef12fb2aee..859aea68a1fa 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -27,6 +27,28 @@
#define WIL6210_RTAP_SIZE (128)
/* Tx/Rx path */
+
+/*
+ * Common representation of physical address in Vring
+ */
+struct vring_dma_addr {
+ __le32 addr_low;
+ __le16 addr_high;
+} __packed;
+
+static inline dma_addr_t wil_desc_addr(struct vring_dma_addr *addr)
+{
+ return le32_to_cpu(addr->addr_low) |
+ ((u64)le16_to_cpu(addr->addr_high) << 32);
+}
+
+static inline void wil_desc_addr_set(struct vring_dma_addr *addr,
+ dma_addr_t pa)
+{
+ addr->addr_low = cpu_to_le32(lower_32_bits(pa));
+ addr->addr_high = cpu_to_le16((u16)upper_32_bits(pa));
+}
+
/*
* Tx descriptor - MAC part
* [dword 0]
@@ -179,6 +201,10 @@ struct vring_tx_mac {
#define DMA_CFG_DESC_TX_0_CMD_EOP_LEN 1
#define DMA_CFG_DESC_TX_0_CMD_EOP_MSK 0x100
+#define DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS 9
+#define DMA_CFG_DESC_TX_0_CMD_MARK_WB_LEN 1
+#define DMA_CFG_DESC_TX_0_CMD_MARK_WB_MSK 0x200
+
#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS 10
#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_LEN 1
#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_MSK 0x400
@@ -216,13 +242,12 @@ struct vring_tx_mac {
struct vring_tx_dma {
u32 d0;
- u32 addr_low;
- u16 addr_high;
+ struct vring_dma_addr addr;
u8 ip_length;
u8 b11; /* 0..6: mac_length; 7:ip_version */
u8 error; /* 0..2: err; 3..7: reserved; */
u8 status; /* 0: used; 1..7; reserved */
- u16 length;
+ __le16 length;
} __packed;
/*
@@ -315,13 +340,12 @@ struct vring_rx_mac {
struct vring_rx_dma {
u32 d0;
- u32 addr_low;
- u16 addr_high;
+ struct vring_dma_addr addr;
u8 ip_length;
u8 b11;
u8 error;
u8 status;
- u16 length;
+ __le16 length;
} __packed;
struct vring_tx_desc {
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 8f76ecd8a7e5..44fdab51de7e 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -34,9 +34,11 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
#define WIL6210_MEM_SIZE (2*1024*1024UL)
-#define WIL6210_RX_RING_SIZE (128)
-#define WIL6210_TX_RING_SIZE (128)
-#define WIL6210_MAX_TX_RINGS (24)
+#define WIL6210_RX_RING_SIZE (128)
+#define WIL6210_TX_RING_SIZE (128)
+#define WIL6210_MAX_TX_RINGS (24) /* HW limit */
+#define WIL6210_MAX_CID (8) /* HW limit */
+#define WIL6210_NAPI_BUDGET (16) /* arbitrary */
/* Hardware definitions begin */
@@ -184,6 +186,7 @@ struct vring {
enum { /* for wil6210_priv.status */
wil_status_fwready = 0,
+ wil_status_fwconnecting,
wil_status_fwconnected,
wil_status_dontscan,
wil_status_reset_done,
@@ -239,6 +242,8 @@ struct wil6210_priv {
* - consumed in thread by wmi_event_worker
*/
spinlock_t wmi_ev_lock;
+ struct napi_struct napi_rx;
+ struct napi_struct napi_tx;
/* DMA related */
struct vring vring_rx;
struct vring vring_tx[WIL6210_MAX_TX_RINGS];
@@ -267,9 +272,13 @@ struct wil6210_priv {
#define wil_to_ndev(i) (wil_to_wdev(i)->netdev)
#define ndev_to_wil(n) (wdev_to_wil(n->ieee80211_ptr))
-#define wil_dbg(wil, fmt, arg...) netdev_dbg(wil_to_ndev(wil), fmt, ##arg)
-#define wil_info(wil, fmt, arg...) netdev_info(wil_to_ndev(wil), fmt, ##arg)
-#define wil_err(wil, fmt, arg...) netdev_err(wil_to_ndev(wil), fmt, ##arg)
+int wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...);
+int wil_err(struct wil6210_priv *wil, const char *fmt, ...);
+int wil_info(struct wil6210_priv *wil, const char *fmt, ...);
+#define wil_dbg(wil, fmt, arg...) do { \
+ netdev_dbg(wil_to_ndev(wil), fmt, ##arg); \
+ wil_dbg_trace(wil, fmt, ##arg); \
+} while (0)
#define wil_dbg_irq(wil, fmt, arg...) wil_dbg(wil, "DBG[ IRQ]" fmt, ##arg)
#define wil_dbg_txrx(wil, fmt, arg...) wil_dbg(wil, "DBG[TXRX]" fmt, ##arg)
@@ -320,7 +329,6 @@ int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid);
int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid);
int wmi_set_channel(struct wil6210_priv *wil, int channel);
int wmi_get_channel(struct wil6210_priv *wil, int *channel);
-int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb);
int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
const void *mac_addr);
int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
@@ -356,10 +364,12 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
void wil_vring_fini_tx(struct wil6210_priv *wil, int id);
netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev);
-void wil_tx_complete(struct wil6210_priv *wil, int ringid);
+int wil_tx_complete(struct wil6210_priv *wil, int ringid);
+void wil6210_unmask_irq_tx(struct wil6210_priv *wil);
/* RX API */
-void wil_rx_handle(struct wil6210_priv *wil);
+void wil_rx_handle(struct wil6210_priv *wil, int *quota);
+void wil6210_unmask_irq_rx(struct wil6210_priv *wil);
int wil_iftype_nl2wmi(enum nl80211_iftype type);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 45b04e383f9a..dc8059ad4bab 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -20,6 +20,7 @@
#include "wil6210.h"
#include "txrx.h"
#include "wmi.h"
+#include "trace.h"
/**
* WMI event receiving - theory of operations
@@ -74,10 +75,11 @@ static const struct {
{0x800000, 0x808000, 0x900000}, /* FW data RAM 32k */
{0x840000, 0x860000, 0x908000}, /* peripheral data RAM 128k/96k used */
{0x880000, 0x88a000, 0x880000}, /* various RGF */
- {0x8c0000, 0x932000, 0x8c0000}, /* trivial mapping for upper area */
+ {0x8c0000, 0x949000, 0x8c0000}, /* trivial mapping for upper area */
/*
* 920000..930000 ucode code RAM
* 930000..932000 ucode data RAM
+ * 932000..949000 back-door debug data
*/
};
@@ -246,6 +248,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
iowrite32(r->head = next_head, wil->csr + HOST_MBOX +
offsetof(struct wil6210_mbox_ctl, tx.head));
+ trace_wil6210_wmi_cmd(cmdid, buf, len);
+
/* interrupt to FW */
iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT);
@@ -311,8 +315,8 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d\n",
data->info.channel, data->info.mcs, data->info.snr);
- wil_dbg_wmi(wil, "status 0x%04x len %d stype %04x\n", d_status, d_len,
- le16_to_cpu(data->info.stype));
+ wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len,
+ le16_to_cpu(fc));
wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
data->info.qid, data->info.mid, data->info.cid);
@@ -406,7 +410,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
(wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
- if (wdev->sme_state != CFG80211_SME_CONNECTING) {
+ if (!test_bit(wil_status_fwconnecting, &wil->status)) {
wil_err(wil, "Not in connecting state\n");
return;
}
@@ -430,6 +434,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL);
}
+ clear_bit(wil_status_fwconnecting, &wil->status);
set_bit(wil_status_fwconnected, &wil->status);
/* FIXME FW can transmit only ucast frames to peer */
@@ -635,8 +640,9 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
hdr.flags);
if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
(len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
- wil_dbg_wmi(wil, "WMI event 0x%04x\n",
- evt->event.wmi.id);
+ u16 id = le16_to_cpu(evt->event.wmi.id);
+ wil_dbg_wmi(wil, "WMI event 0x%04x\n", id);
+ trace_wil6210_wmi_event(id, &evt->event.wmi, len);
}
wil_hex_dump_wmi("evt ", DUMP_PREFIX_OFFSET, 16, 1,
&evt->event.hdr, sizeof(hdr) + len, true);
@@ -724,7 +730,7 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan)
.bcon_interval = cpu_to_le16(bi),
.network_type = wmi_nettype,
.disable_sec_offload = 1,
- .channel = chan,
+ .channel = chan - 1,
};
struct {
struct wil6210_mbox_hdr_wmi wmi;
@@ -734,8 +740,12 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan)
if (!wil->secure_pcp)
cmd.disable_sec = 1;
+ /*
+ * Processing time may be huge, in case of secure AP it takes about
+ * 3500ms for FW to start AP
+ */
rc = wmi_call(wil, WMI_PCP_START_CMDID, &cmd, sizeof(cmd),
- WMI_PCP_STARTED_EVENTID, &reply, sizeof(reply), 100);
+ WMI_PCP_STARTED_EVENTID, &reply, sizeof(reply), 5000);
if (rc)
return rc;
@@ -829,40 +839,6 @@ int wmi_p2p_cfg(struct wil6210_priv *wil, int channel)
return wmi_send(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd));
}
-int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb)
-{
- struct wmi_eapol_tx_cmd *cmd;
- struct ethhdr *eth;
- u16 eapol_len = skb->len - ETH_HLEN;
- void *eapol = skb->data + ETH_HLEN;
- uint i;
- int rc;
-
- skb_set_mac_header(skb, 0);
- eth = eth_hdr(skb);
- wil_dbg_wmi(wil, "EAPOL %d bytes to %pM\n", eapol_len, eth->h_dest);
- for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
- if (memcmp(wil->dst_addr[i], eth->h_dest, ETH_ALEN) == 0)
- goto found_dest;
- }
-
- return -EINVAL;
-
- found_dest:
- /* find out eapol data & len */
- cmd = kzalloc(sizeof(*cmd) + eapol_len, GFP_KERNEL);
- if (!cmd)
- return -EINVAL;
-
- memcpy(cmd->dst_mac, eth->h_dest, ETH_ALEN);
- cmd->eapol_len = cpu_to_le16(eapol_len);
- memcpy(cmd->eapol, eapol, eapol_len);
- rc = wmi_send(wil, WMI_EAPOL_TX_CMDID, cmd, sizeof(*cmd) + eapol_len);
- kfree(cmd);
-
- return rc;
-}
-
int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
const void *mac_addr)
{
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 078e6f3477a9..51ff0b198d0a 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -28,18 +28,12 @@ config B43
config B43_BCMA
bool "Support for BCMA bus"
- depends on B43 && BCMA
- default y
-
-config B43_BCMA_EXTRA
- bool "Hardware support that overlaps with the brcmsmac driver"
- depends on B43_BCMA
- default n if BRCMSMAC
+ depends on B43 && (BCMA = y || BCMA = B43)
default y
config B43_SSB
bool
- depends on B43 && SSB
+ depends on B43 && (SSB = y || SSB = B43)
default y
# Auto-select SSB PCI-HOST support, if possible
@@ -111,6 +105,7 @@ config B43_PIO
config B43_PHY_N
bool "Support for 802.11n (N-PHY) devices"
depends on B43
+ default y
---help---
Support for the N-PHY.
@@ -132,6 +127,7 @@ config B43_PHY_LP
config B43_PHY_HT
bool "Support for HT-PHY (high throughput) devices"
depends on B43 && B43_BCMA
+ default y
---help---
Support for the HT-PHY.
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index a95b77ab360e..0e933bb71543 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -113,13 +113,15 @@ static int b43_modparam_pio = 0;
module_param_named(pio, b43_modparam_pio, int, 0644);
MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO");
+static int modparam_allhwsupport = !IS_ENABLED(CONFIG_BRCMSMAC);
+module_param_named(allhwsupport, modparam_allhwsupport, int, 0444);
+MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if overlaps with the brcmsmac driver)");
+
#ifdef CONFIG_B43_BCMA
static const struct bcma_device_id b43_bcma_tbl[] = {
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x11, BCMA_ANY_CLASS),
-#ifdef CONFIG_B43_BCMA_EXTRA
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x17, BCMA_ANY_CLASS),
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x18, BCMA_ANY_CLASS),
-#endif
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x1D, BCMA_ANY_CLASS),
BCMA_CORETABLE_END
};
@@ -5396,6 +5398,12 @@ static int b43_bcma_probe(struct bcma_device *core)
struct b43_wl *wl;
int err;
+ if (!modparam_allhwsupport &&
+ (core->id.rev == 0x17 || core->id.rev == 0x18)) {
+ pr_err("Support for cores revisions 0x17 and 0x18 disabled by module param allhwsupport=0. Try b43.allhwsupport=1\n");
+ return -ENOTSUPP;
+ }
+
dev = b43_bus_dev_bcma_init(core);
if (!dev)
return -ENODEV;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 4891e3df2058..e3f3c48f86d4 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -22,9 +22,11 @@
#include <linux/pci_ids.h>
#include <linux/sched.h>
#include <linux/completion.h>
+#include <linux/scatterlist.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
#include <linux/platform_data/brcmfmac-sdio.h>
#include <defs.h>
@@ -160,7 +162,7 @@ int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev)
return 0;
}
-int
+static int
brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
{
int err = 0, i;
@@ -191,12 +193,33 @@ brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
return err;
}
+static int
+brcmf_sdio_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
+{
+ uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+ int err = 0;
+
+ if (bar0 != sdiodev->sbwad) {
+ err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
+ if (err)
+ return err;
+
+ sdiodev->sbwad = bar0;
+ }
+
+ *addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+ if (width == 4)
+ *addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ return 0;
+}
+
int
brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
void *data, bool write)
{
u8 func_num, reg_size;
- u32 bar;
s32 retry = 0;
int ret;
@@ -216,18 +239,7 @@ brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
func_num = SDIO_FUNC_1;
reg_size = 4;
- /* Set the window for SB core register */
- bar = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
- if (bar != sdiodev->sbwad) {
- ret = brcmf_sdcard_set_sbaddr_window(sdiodev, bar);
- if (ret != 0) {
- memset(data, 0xFF, reg_size);
- return ret;
- }
- sdiodev->sbwad = bar;
- }
- addr &= SBSDIO_SB_OFT_ADDR_MASK;
- addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+ brcmf_sdio_addrprep(sdiodev, reg_size, &addr);
}
do {
@@ -303,30 +315,207 @@ void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
*ret = retval;
}
-static int brcmf_sdcard_recv_prepare(struct brcmf_sdio_dev *sdiodev, uint fn,
- uint flags, uint width, u32 *addr)
+/**
+ * brcmf_sdio_buffrw - SDIO interface function for block data access
+ * @sdiodev: brcmfmac sdio device
+ * @fn: SDIO function number
+ * @write: direction flag
+ * @addr: dongle memory address as source/destination
+ * @pkt: skb pointer
+ *
+ * This function takes the respbonsibility as the interface function to MMC
+ * stack for block data access. It assumes that the skb passed down by the
+ * caller has already been padded and aligned.
+ */
+static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
+ bool write, u32 addr, struct sk_buff_head *pktlist)
{
- uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
- int err = 0;
+ unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
+ unsigned int max_blks, max_req_sz, orig_offset, dst_offset;
+ unsigned short max_seg_sz, seg_sz;
+ unsigned char *pkt_data, *orig_data, *dst_data;
+ struct sk_buff *pkt_next = NULL, *local_pkt_next;
+ struct sk_buff_head local_list, *target_list;
+ struct mmc_request mmc_req;
+ struct mmc_command mmc_cmd;
+ struct mmc_data mmc_dat;
+ struct sg_table st;
+ struct scatterlist *sgl;
+ struct mmc_host *host;
+ int ret = 0;
- /* Async not implemented yet */
- if (flags & SDIO_REQ_ASYNC)
- return -ENOTSUPP;
+ if (!pktlist->qlen)
+ return -EINVAL;
- if (bar0 != sdiodev->sbwad) {
- err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
- if (err)
- return err;
+ brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
+ if (brcmf_pm_resume_error(sdiodev))
+ return -EIO;
- sdiodev->sbwad = bar0;
+ /* Single skb use the standard mmc interface */
+ if (pktlist->qlen == 1) {
+ pkt_next = pktlist->next;
+ req_sz = pkt_next->len + 3;
+ req_sz &= (uint)~3;
+
+ if (write)
+ return sdio_memcpy_toio(sdiodev->func[fn], addr,
+ ((u8 *)(pkt_next->data)),
+ req_sz);
+ else if (fn == 1)
+ return sdio_memcpy_fromio(sdiodev->func[fn],
+ ((u8 *)(pkt_next->data)),
+ addr, req_sz);
+ else
+ /* function 2 read is FIFO operation */
+ return sdio_readsb(sdiodev->func[fn],
+ ((u8 *)(pkt_next->data)), addr,
+ req_sz);
}
- *addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ target_list = pktlist;
+ /* for host with broken sg support, prepare a page aligned list */
+ __skb_queue_head_init(&local_list);
+ if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) {
+ req_sz = 0;
+ skb_queue_walk(pktlist, pkt_next)
+ req_sz += pkt_next->len;
+ req_sz = ALIGN(req_sz, sdiodev->func[fn]->cur_blksize);
+ while (req_sz > PAGE_SIZE) {
+ pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE);
+ if (pkt_next == NULL) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ __skb_queue_tail(&local_list, pkt_next);
+ req_sz -= PAGE_SIZE;
+ }
+ pkt_next = brcmu_pkt_buf_get_skb(req_sz);
+ if (pkt_next == NULL) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ __skb_queue_tail(&local_list, pkt_next);
+ target_list = &local_list;
+ }
- if (width == 4)
- *addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+ host = sdiodev->func[fn]->card->host;
+ func_blk_sz = sdiodev->func[fn]->cur_blksize;
+ /* Blocks per command is limited by host count, host transfer
+ * size and the maximum for IO_RW_EXTENDED of 511 blocks.
+ */
+ max_blks = min_t(unsigned int, host->max_blk_count, 511u);
+ max_req_sz = min_t(unsigned int, host->max_req_size,
+ max_blks * func_blk_sz);
+ max_seg_sz = min_t(unsigned short, host->max_segs, SG_MAX_SINGLE_ALLOC);
+ max_seg_sz = min_t(unsigned short, max_seg_sz, target_list->qlen);
+ seg_sz = target_list->qlen;
+ pkt_offset = 0;
+ pkt_next = target_list->next;
+
+ if (sg_alloc_table(&st, max_seg_sz, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto exit;
+ }
- return 0;
+ while (seg_sz) {
+ req_sz = 0;
+ sg_cnt = 0;
+ memset(&mmc_req, 0, sizeof(struct mmc_request));
+ memset(&mmc_cmd, 0, sizeof(struct mmc_command));
+ memset(&mmc_dat, 0, sizeof(struct mmc_data));
+ sgl = st.sgl;
+ /* prep sg table */
+ while (pkt_next != (struct sk_buff *)target_list) {
+ pkt_data = pkt_next->data + pkt_offset;
+ sg_data_sz = pkt_next->len - pkt_offset;
+ if (sg_data_sz > host->max_seg_size)
+ sg_data_sz = host->max_seg_size;
+ if (sg_data_sz > max_req_sz - req_sz)
+ sg_data_sz = max_req_sz - req_sz;
+
+ sg_set_buf(sgl, pkt_data, sg_data_sz);
+
+ sg_cnt++;
+ sgl = sg_next(sgl);
+ req_sz += sg_data_sz;
+ pkt_offset += sg_data_sz;
+ if (pkt_offset == pkt_next->len) {
+ pkt_offset = 0;
+ pkt_next = pkt_next->next;
+ }
+
+ if (req_sz >= max_req_sz || sg_cnt >= max_seg_sz)
+ break;
+ }
+ seg_sz -= sg_cnt;
+
+ if (req_sz % func_blk_sz != 0) {
+ brcmf_err("sg request length %u is not %u aligned\n",
+ req_sz, func_blk_sz);
+ ret = -ENOTBLK;
+ goto exit;
+ }
+ mmc_dat.sg = st.sgl;
+ mmc_dat.sg_len = sg_cnt;
+ mmc_dat.blksz = func_blk_sz;
+ mmc_dat.blocks = req_sz / func_blk_sz;
+ mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
+ mmc_cmd.opcode = SD_IO_RW_EXTENDED;
+ mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */
+ mmc_cmd.arg |= (fn & 0x7) << 28; /* SDIO func num */
+ mmc_cmd.arg |= 1<<27; /* block mode */
+ /* incrementing addr for function 1 */
+ mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
+ mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */
+ mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */
+ mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
+ mmc_req.cmd = &mmc_cmd;
+ mmc_req.data = &mmc_dat;
+ if (fn == 1)
+ addr += req_sz;
+
+ mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card);
+ mmc_wait_for_req(host, &mmc_req);
+
+ ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
+ if (ret != 0) {
+ brcmf_err("CMD53 sg block %s failed %d\n",
+ write ? "write" : "read", ret);
+ ret = -EIO;
+ break;
+ }
+ }
+
+ if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) {
+ local_pkt_next = local_list.next;
+ orig_offset = 0;
+ skb_queue_walk(pktlist, pkt_next) {
+ dst_offset = 0;
+ do {
+ req_sz = local_pkt_next->len - orig_offset;
+ req_sz = min_t(uint, pkt_next->len - dst_offset,
+ req_sz);
+ orig_data = local_pkt_next->data + orig_offset;
+ dst_data = pkt_next->data + dst_offset;
+ memcpy(dst_data, orig_data, req_sz);
+ orig_offset += req_sz;
+ dst_offset += req_sz;
+ if (orig_offset == local_pkt_next->len) {
+ orig_offset = 0;
+ local_pkt_next = local_pkt_next->next;
+ }
+ if (dst_offset == pkt_next->len)
+ break;
+ } while (!skb_queue_empty(&local_list));
+ }
+ }
+
+exit:
+ sg_free_table(&st);
+ while ((pkt_next = __skb_dequeue(&local_list)) != NULL)
+ brcmu_pkt_buf_free_skb(pkt_next);
+
+ return ret;
}
int
@@ -355,21 +544,22 @@ int
brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
uint flags, struct sk_buff *pkt)
{
- uint incr_fix;
uint width;
int err = 0;
+ struct sk_buff_head pkt_list;
brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
fn, addr, pkt->len);
width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
- err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
+ err = brcmf_sdio_addrprep(sdiodev, width, &addr);
if (err)
goto done;
- incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
- err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ,
- fn, addr, pkt);
+ skb_queue_head_init(&pkt_list);
+ skb_queue_tail(&pkt_list, pkt);
+ err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, &pkt_list);
+ skb_dequeue_tail(&pkt_list);
done:
return err;
@@ -386,13 +576,12 @@ int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
fn, addr, pktq->qlen);
width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
- err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
+ err = brcmf_sdio_addrprep(sdiodev, width, &addr);
if (err)
goto done;
incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
- err = brcmf_sdioh_request_chain(sdiodev, incr_fix, SDIOH_READ, fn, addr,
- pktq);
+ err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pktq);
done:
return err;
@@ -424,37 +613,21 @@ int
brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
uint flags, struct sk_buff *pkt)
{
- uint incr_fix;
uint width;
- uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
int err = 0;
+ struct sk_buff_head pkt_list;
brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
fn, addr, pkt->len);
- /* Async not implemented yet */
- if (flags & SDIO_REQ_ASYNC)
- return -ENOTSUPP;
-
- if (bar0 != sdiodev->sbwad) {
- err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
- if (err)
- goto done;
-
- sdiodev->sbwad = bar0;
- }
-
- addr &= SBSDIO_SB_OFT_ADDR_MASK;
-
- incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
- if (width == 4)
- addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+ brcmf_sdio_addrprep(sdiodev, width, &addr);
- err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_WRITE, fn,
- addr, pkt);
+ skb_queue_head_init(&pkt_list);
+ skb_queue_tail(&pkt_list, pkt);
+ err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, &pkt_list);
+ skb_dequeue_tail(&pkt_list);
-done:
return err;
}
@@ -466,6 +639,7 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
struct sk_buff *pkt;
u32 sdaddr;
uint dsize;
+ struct sk_buff_head pkt_list;
dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
pkt = dev_alloc_skb(dsize);
@@ -474,6 +648,7 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
return -EIO;
}
pkt->priority = 0;
+ skb_queue_head_init(&pkt_list);
/* Determine initial transfer parameters */
sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
@@ -501,9 +676,10 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
skb_put(pkt, dsize);
if (write)
memcpy(pkt->data, data, dsize);
- bcmerror = brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC,
- write, SDIO_FUNC_1,
- sdaddr, pkt);
+ skb_queue_tail(&pkt_list, pkt);
+ bcmerror = brcmf_sdio_buffrw(sdiodev, SDIO_FUNC_1, write,
+ sdaddr, &pkt_list);
+ skb_dequeue_tail(&pkt_list);
if (bcmerror) {
brcmf_err("membytes transfer failed\n");
break;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index 44fa0cdbf97b..289e386f01f6 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -66,7 +66,7 @@ MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata;
-static bool
+bool
brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev)
{
bool is_err = false;
@@ -76,7 +76,7 @@ brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev)
return is_err;
}
-static void
+void
brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev, wait_queue_head_t *wq)
{
#ifdef CONFIG_PM_SLEEP
@@ -211,115 +211,6 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
return err_ret;
}
-/* precondition: host controller is claimed */
-static int
-brcmf_sdioh_request_data(struct brcmf_sdio_dev *sdiodev, uint write, bool fifo,
- uint func, uint addr, struct sk_buff *pkt, uint pktlen)
-{
- int err_ret = 0;
-
- if ((write) && (!fifo)) {
- err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
- ((u8 *) (pkt->data)), pktlen);
- } else if (write) {
- err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
- ((u8 *) (pkt->data)), pktlen);
- } else if (fifo) {
- err_ret = sdio_readsb(sdiodev->func[func],
- ((u8 *) (pkt->data)), addr, pktlen);
- } else {
- err_ret = sdio_memcpy_fromio(sdiodev->func[func],
- ((u8 *) (pkt->data)),
- addr, pktlen);
- }
-
- return err_ret;
-}
-
-/*
- * This function takes a queue of packets. The packets on the queue
- * are assumed to be properly aligned by the caller.
- */
-int
-brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
- uint write, uint func, uint addr,
- struct sk_buff_head *pktq)
-{
- bool fifo = (fix_inc == SDIOH_DATA_FIX);
- u32 SGCount = 0;
- int err_ret = 0;
-
- struct sk_buff *pkt;
-
- brcmf_dbg(SDIO, "Enter\n");
-
- brcmf_pm_resume_wait(sdiodev, &sdiodev->request_chain_wait);
- if (brcmf_pm_resume_error(sdiodev))
- return -EIO;
-
- skb_queue_walk(pktq, pkt) {
- uint pkt_len = pkt->len;
- pkt_len += 3;
- pkt_len &= 0xFFFFFFFC;
-
- err_ret = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
- addr, pkt, pkt_len);
- if (err_ret) {
- brcmf_err("%s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
- write ? "TX" : "RX", pkt, SGCount, addr,
- pkt_len, err_ret);
- } else {
- brcmf_dbg(SDIO, "%s xfr'd %p[%d], addr=0x%05x, len=%d\n",
- write ? "TX" : "RX", pkt, SGCount, addr,
- pkt_len);
- }
- if (!fifo)
- addr += pkt_len;
-
- SGCount++;
- }
-
- brcmf_dbg(SDIO, "Exit\n");
- return err_ret;
-}
-
-/*
- * This function takes a single DMA-able packet.
- */
-int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
- uint fix_inc, uint write, uint func, uint addr,
- struct sk_buff *pkt)
-{
- int status;
- uint pkt_len;
- bool fifo = (fix_inc == SDIOH_DATA_FIX);
-
- brcmf_dbg(SDIO, "Enter\n");
-
- if (pkt == NULL)
- return -EINVAL;
- pkt_len = pkt->len;
-
- brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
- if (brcmf_pm_resume_error(sdiodev))
- return -EIO;
-
- pkt_len += 3;
- pkt_len &= (uint)~3;
-
- status = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
- addr, pkt, pkt_len);
- if (status) {
- brcmf_err("%s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
- write ? "TX" : "RX", pkt, addr, pkt_len, status);
- } else {
- brcmf_dbg(SDIO, "%s xfr'd %p, addr=0x%05x, len=%d\n",
- write ? "TX" : "RX", pkt, addr, pkt_len);
- }
-
- return status;
-}
-
static int brcmf_sdioh_get_cisaddr(struct brcmf_sdio_dev *sdiodev, u32 regaddr)
{
/* read 24 bits and return valid 17 bit addr */
@@ -468,7 +359,6 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
atomic_set(&sdiodev->suspend, false);
init_waitqueue_head(&sdiodev->request_byte_wait);
init_waitqueue_head(&sdiodev->request_word_wait);
- init_waitqueue_head(&sdiodev->request_chain_wait);
init_waitqueue_head(&sdiodev->request_buffer_wait);
brcmf_dbg(SDIO, "F2 found, calling brcmf_sdio_probe...\n");
@@ -606,7 +496,8 @@ static int brcmf_sdio_pd_remove(struct platform_device *pdev)
static struct platform_driver brcmf_sdio_pd = {
.remove = brcmf_sdio_pd_remove,
.driver = {
- .name = BRCMFMAC_SDIO_PDATA_NAME
+ .name = BRCMFMAC_SDIO_PDATA_NAME,
+ .owner = THIS_MODULE,
}
};
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 28db9cf39672..86cbfe2c7c6c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -583,6 +583,7 @@ enum brcmf_netif_stop_reason {
* @bssidx: index of bss associated with this interface.
* @mac_addr: assigned mac address.
* @netif_stop: bitmap indicates reason why netif queues are stopped.
+ * @netif_stop_lock: spinlock for update netif_stop from multiple sources.
* @pend_8021x_cnt: tracks outstanding number of 802.1x frames.
* @pend_8021x_wait: used for signalling change in count.
*/
@@ -598,6 +599,7 @@ struct brcmf_if {
s32 bssidx;
u8 mac_addr[ETH_ALEN];
u8 netif_stop;
+ spinlock_t netif_stop_lock;
atomic_t pend_8021x_cnt;
wait_queue_head_t pend_8021x_wait;
};
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
index 59c77aa3b959..dd85401063cb 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
@@ -30,6 +30,7 @@
#include "dhd_bus.h"
#include "fwsignal.h"
#include "dhd_dbg.h"
+#include "tracepoint.h"
struct brcmf_proto_cdc_dcmd {
__le32 cmd; /* dongle command value */
@@ -292,6 +293,7 @@ void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx, u8 offset,
h->flags2 = 0;
h->data_offset = offset;
BDC_SET_IF_IDX(h, ifidx);
+ trace_brcmf_bdchdr(pktbuf->data);
}
int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
@@ -309,6 +311,7 @@ int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
return -EBADE;
}
+ trace_brcmf_bdchdr(pktbuf->data);
h = (struct brcmf_proto_bdc_header *)(pktbuf->data);
*ifidx = BDC_GET_IF_IDX(h);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
index 202869cd0932..c37b9d68e458 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
@@ -156,8 +156,11 @@ ssize_t brcmf_debugfs_fws_stats_read(struct file *f, char __user *data,
"txs_suppr_core: %u\n"
"txs_suppr_ps: %u\n"
"txs_tossed: %u\n"
+ "txs_host_tossed: %u\n"
+ "bus_flow_block: %u\n"
+ "fws_flow_block: %u\n"
"send_pkts: BK:%u BE:%u VO:%u VI:%u BCMC:%u\n"
- "fifo_credits_sent: BK:%u BE:%u VO:%u VI:%u BCMC:%u\n",
+ "requested_sent: BK:%u BE:%u VO:%u VI:%u BCMC:%u\n",
fwstats->header_pulls,
fwstats->header_only_pkt,
fwstats->tlv_parse_failed,
@@ -176,14 +179,17 @@ ssize_t brcmf_debugfs_fws_stats_read(struct file *f, char __user *data,
fwstats->txs_supp_core,
fwstats->txs_supp_ps,
fwstats->txs_tossed,
+ fwstats->txs_host_tossed,
+ fwstats->bus_flow_block,
+ fwstats->fws_flow_block,
fwstats->send_pkts[0], fwstats->send_pkts[1],
fwstats->send_pkts[2], fwstats->send_pkts[3],
fwstats->send_pkts[4],
- fwstats->fifo_credits_sent[0],
- fwstats->fifo_credits_sent[1],
- fwstats->fifo_credits_sent[2],
- fwstats->fifo_credits_sent[3],
- fwstats->fifo_credits_sent[4]);
+ fwstats->requested_sent[0],
+ fwstats->requested_sent[1],
+ fwstats->requested_sent[2],
+ fwstats->requested_sent[3],
+ fwstats->requested_sent[4]);
return simple_read_from_buffer(data, count, ppos, buf, res);
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
index 009c87bfd9ae..0af1f5dc583a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
@@ -141,8 +141,7 @@ struct brcmf_fws_stats {
u32 header_pulls;
u32 pkt2bus;
u32 send_pkts[5];
- u32 fifo_credits_sent[5];
- u32 fifo_credits_back[6];
+ u32 requested_sent[5];
u32 generic_error;
u32 mac_update_failed;
u32 mac_ps_update_failed;
@@ -158,6 +157,9 @@ struct brcmf_fws_stats {
u32 txs_supp_core;
u32 txs_supp_ps;
u32 txs_tossed;
+ u32 txs_host_tossed;
+ u32 bus_flow_block;
+ u32 fws_flow_block;
};
struct brcmf_pub;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 2c593570497c..80099016d21f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -179,7 +179,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
struct brcmf_pub *drvr = ifp->drvr;
struct ethhdr *eh;
- brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
+ brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
/* Can the device send data? */
if (drvr->bus_if->state != BRCMF_BUS_DATA) {
@@ -240,11 +240,15 @@ done:
void brcmf_txflowblock_if(struct brcmf_if *ifp,
enum brcmf_netif_stop_reason reason, bool state)
{
- if (!ifp)
+ unsigned long flags;
+
+ if (!ifp || !ifp->ndev)
return;
brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n",
ifp->bssidx, ifp->netif_stop, reason, state);
+
+ spin_lock_irqsave(&ifp->netif_stop_lock, flags);
if (state) {
if (!ifp->netif_stop)
netif_stop_queue(ifp->ndev);
@@ -254,6 +258,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
if (!ifp->netif_stop)
netif_wake_queue(ifp->ndev);
}
+ spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
}
void brcmf_txflowblock(struct device *dev, bool state)
@@ -264,15 +269,18 @@ void brcmf_txflowblock(struct device *dev, bool state)
brcmf_dbg(TRACE, "Enter\n");
- for (i = 0; i < BRCMF_MAX_IFS; i++)
- brcmf_txflowblock_if(drvr->iflist[i],
- BRCMF_NETIF_STOP_REASON_BLOCK_BUS, state);
+ if (brcmf_fws_fc_active(drvr->fws)) {
+ brcmf_fws_bus_blocked(drvr, state);
+ } else {
+ for (i = 0; i < BRCMF_MAX_IFS; i++)
+ brcmf_txflowblock_if(drvr->iflist[i],
+ BRCMF_NETIF_STOP_REASON_BLOCK_BUS,
+ state);
+ }
}
void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
{
- unsigned char *eth;
- uint len;
struct sk_buff *skb, *pnext;
struct brcmf_if *ifp;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
@@ -280,7 +288,7 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
u8 ifidx;
int ret;
- brcmf_dbg(TRACE, "Enter\n");
+ brcmf_dbg(DATA, "Enter\n");
skb_queue_walk_safe(skb_list, skb, pnext) {
skb_unlink(skb, skb_list);
@@ -296,33 +304,12 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
continue;
}
- /* Get the protocol, maintain skb around eth_type_trans()
- * The main reason for this hack is for the limitation of
- * Linux 2.4 where 'eth_type_trans' uses the
- * 'net->hard_header_len'
- * to perform skb_pull inside vs ETH_HLEN. Since to avoid
- * coping of the packet coming from the network stack to add
- * BDC, Hardware header etc, during network interface
- * registration
- * we set the 'net->hard_header_len' to ETH_HLEN + extra space
- * required
- * for BDC, Hardware header etc. and not just the ETH_HLEN
- */
- eth = skb->data;
- len = skb->len;
-
skb->dev = ifp->ndev;
skb->protocol = eth_type_trans(skb, skb->dev);
if (skb->pkt_type == PACKET_MULTICAST)
ifp->stats.multicast++;
- skb->data = eth;
- skb->len = len;
-
- /* Strip header, count, deliver upward */
- skb_pull(skb, ETH_HLEN);
-
/* Process special event packets */
brcmf_fweh_process_skb(drvr, skb);
@@ -338,10 +325,8 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
netif_rx(skb);
else
/* If the receive is not processed inside an ISR,
- * the softirqd must be woken explicitly to service
- * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
- * by netif_rx_ni(), but in earlier kernels, we need
- * to do it manually.
+ * the softirqd must be woken explicitly to service the
+ * NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
*/
netif_rx_ni(skb);
}
@@ -630,7 +615,7 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
/* set appropriate operations */
ndev->netdev_ops = &brcmf_netdev_ops_pri;
- ndev->hard_header_len = ETH_HLEN + drvr->hdrlen;
+ ndev->hard_header_len += drvr->hdrlen;
ndev->ethtool_ops = &brcmf_ethtool_ops;
drvr->rxsz = ndev->mtu + ndev->hard_header_len +
@@ -779,6 +764,7 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
ifp->bssidx = bssidx;
init_waitqueue_head(&ifp->pend_8021x_wait);
+ spin_lock_init(&ifp->netif_stop_lock);
if (mac_addr != NULL)
memcpy(ifp->mac_addr, mac_addr, ETH_ALEN);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index d2487518bd2a..264111968320 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -448,8 +448,6 @@ struct brcmf_sdio {
uint rxblen; /* Allocated length of rxbuf */
u8 *rxctl; /* Aligned pointer into rxbuf */
u8 *rxctl_orig; /* pointer for freeing rxctl */
- u8 *databuf; /* Buffer for receiving big glom packet */
- u8 *dataptr; /* Aligned pointer into databuf */
uint rxlen; /* Length of valid data in buffer */
spinlock_t rxctl_lock; /* protection lock for ctrl frame resources */
@@ -473,8 +471,6 @@ struct brcmf_sdio {
s32 idletime; /* Control for activity timeout */
s32 idlecount; /* Activity timeout counter */
s32 idleclock; /* How to set bus driver when idle */
- s32 sd_rxchain;
- bool use_rxchain; /* If brcmf should use PKT chains */
bool rxflow_mode; /* Rx flow control mode */
bool rxflow; /* Is rx flow control on */
bool alp_only; /* Don't use HT clock (ALP only) */
@@ -495,8 +491,7 @@ struct brcmf_sdio {
struct workqueue_struct *brcmf_wq;
struct work_struct datawork;
- struct list_head dpc_tsklst;
- spinlock_t dpc_tl_lock;
+ atomic_t dpc_tskcnt;
const struct firmware *firmware;
u32 fw_ptr;
@@ -1026,29 +1021,6 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
}
-/* copy a buffer into a pkt buffer chain */
-static uint brcmf_sdbrcm_glom_from_buf(struct brcmf_sdio *bus, uint len)
-{
- uint n, ret = 0;
- struct sk_buff *p;
- u8 *buf;
-
- buf = bus->dataptr;
-
- /* copy the data */
- skb_queue_walk(&bus->glom, p) {
- n = min_t(uint, p->len, len);
- memcpy(p->data, buf, n);
- buf += n;
- len -= n;
- ret += n;
- if (!len)
- break;
- }
-
- return ret;
-}
-
/* return total length of buffer chain */
static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus)
{
@@ -1202,8 +1174,6 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
int errcode;
u8 doff, sfdoff;
- bool usechain = bus->use_rxchain;
-
struct brcmf_sdio_read rd_new;
/* If packets, issue read(s) and send up packet chain */
@@ -1238,7 +1208,6 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
if (sublen % BRCMF_SDALIGN) {
brcmf_err("sublen %d not multiple of %d\n",
sublen, BRCMF_SDALIGN);
- usechain = false;
}
totlen += sublen;
@@ -1305,27 +1274,9 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
* packet and and copy into the chain.
*/
sdio_claim_host(bus->sdiodev->func[1]);
- if (usechain) {
- errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
- bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, &bus->glom);
- } else if (bus->dataptr) {
- errcode = brcmf_sdcard_recv_buf(bus->sdiodev,
- bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC,
- bus->dataptr, dlen);
- sublen = (u16) brcmf_sdbrcm_glom_from_buf(bus, dlen);
- if (sublen != dlen) {
- brcmf_err("FAILED TO COPY, dlen %d sublen %d\n",
- dlen, sublen);
- errcode = -1;
- }
- pnext = NULL;
- } else {
- brcmf_err("COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n",
- dlen);
- errcode = -1;
- }
+ errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
+ bus->sdiodev->sbwad,
+ SDIO_FUNC_2, F2SYNC, &bus->glom);
sdio_release_host(bus->sdiodev->func[1]);
bus->sdcnt.f2rxdata++;
@@ -2061,23 +2012,6 @@ static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
}
}
-static inline void brcmf_sdbrcm_adddpctsk(struct brcmf_sdio *bus)
-{
- struct list_head *new_hd;
- unsigned long flags;
-
- if (in_interrupt())
- new_hd = kzalloc(sizeof(struct list_head), GFP_ATOMIC);
- else
- new_hd = kzalloc(sizeof(struct list_head), GFP_KERNEL);
- if (new_hd == NULL)
- return;
-
- spin_lock_irqsave(&bus->dpc_tl_lock, flags);
- list_add_tail(new_hd, &bus->dpc_tsklst);
- spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
-}
-
static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
{
u8 idx;
@@ -2312,7 +2246,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
(!atomic_read(&bus->fcstate) &&
brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
data_ok(bus)) || PKT_AVAILABLE()) {
- brcmf_sdbrcm_adddpctsk(bus);
+ atomic_inc(&bus->dpc_tskcnt);
}
/* If we're done for now, turn off clock request. */
@@ -2342,7 +2276,6 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
- unsigned long flags;
brcmf_dbg(TRACE, "Enter\n");
@@ -2369,26 +2302,21 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
} else {
ret = 0;
}
- spin_unlock_bh(&bus->txqlock);
if (pktq_len(&bus->txq) >= TXHI) {
bus->txoff = true;
brcmf_txflowblock(bus->sdiodev->dev, true);
}
+ spin_unlock_bh(&bus->txqlock);
#ifdef DEBUG
if (pktq_plen(&bus->txq, prec) > qcount[prec])
qcount[prec] = pktq_plen(&bus->txq, prec);
#endif
- spin_lock_irqsave(&bus->dpc_tl_lock, flags);
- if (list_empty(&bus->dpc_tsklst)) {
- spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
-
- brcmf_sdbrcm_adddpctsk(bus);
+ if (atomic_read(&bus->dpc_tskcnt) == 0) {
+ atomic_inc(&bus->dpc_tskcnt);
queue_work(bus->brcmf_wq, &bus->datawork);
- } else {
- spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
}
return ret;
@@ -2525,7 +2453,6 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
- unsigned long flags;
brcmf_dbg(TRACE, "Enter\n");
@@ -2612,18 +2539,13 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
} while (ret < 0 && retries++ < TXRETRIES);
}
- spin_lock_irqsave(&bus->dpc_tl_lock, flags);
if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) &&
- list_empty(&bus->dpc_tsklst)) {
- spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
-
+ atomic_read(&bus->dpc_tskcnt) == 0) {
bus->activity = false;
sdio_claim_host(bus->sdiodev->func[1]);
brcmf_dbg(INFO, "idle\n");
brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
sdio_release_host(bus->sdiodev->func[1]);
- } else {
- spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
}
if (ret)
@@ -3451,7 +3373,7 @@ void brcmf_sdbrcm_isr(void *arg)
if (!bus->intr)
brcmf_err("isr w/o interrupt configured!\n");
- brcmf_sdbrcm_adddpctsk(bus);
+ atomic_inc(&bus->dpc_tskcnt);
queue_work(bus->brcmf_wq, &bus->datawork);
}
@@ -3460,7 +3382,6 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
#ifdef DEBUG
struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
#endif /* DEBUG */
- unsigned long flags;
brcmf_dbg(TIMER, "Enter\n");
@@ -3476,11 +3397,9 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
if (!bus->intr ||
(bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) {
- spin_lock_irqsave(&bus->dpc_tl_lock, flags);
- if (list_empty(&bus->dpc_tsklst)) {
+ if (atomic_read(&bus->dpc_tskcnt) == 0) {
u8 devpend;
- spin_unlock_irqrestore(&bus->dpc_tl_lock,
- flags);
+
sdio_claim_host(bus->sdiodev->func[1]);
devpend = brcmf_sdio_regrb(bus->sdiodev,
SDIO_CCCR_INTx,
@@ -3489,9 +3408,6 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
intstatus =
devpend & (INTR_STATUS_FUNC1 |
INTR_STATUS_FUNC2);
- } else {
- spin_unlock_irqrestore(&bus->dpc_tl_lock,
- flags);
}
/* If there is something, make like the ISR and
@@ -3500,7 +3416,7 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
bus->sdcnt.pollcnt++;
atomic_set(&bus->ipend, 1);
- brcmf_sdbrcm_adddpctsk(bus);
+ atomic_inc(&bus->dpc_tskcnt);
queue_work(bus->brcmf_wq, &bus->datawork);
}
}
@@ -3545,41 +3461,15 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
return (atomic_read(&bus->ipend) > 0);
}
-static bool brcmf_sdbrcm_chipmatch(u16 chipid)
-{
- if (chipid == BCM43143_CHIP_ID)
- return true;
- if (chipid == BCM43241_CHIP_ID)
- return true;
- if (chipid == BCM4329_CHIP_ID)
- return true;
- if (chipid == BCM4330_CHIP_ID)
- return true;
- if (chipid == BCM4334_CHIP_ID)
- return true;
- if (chipid == BCM4335_CHIP_ID)
- return true;
- return false;
-}
-
static void brcmf_sdio_dataworker(struct work_struct *work)
{
struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio,
datawork);
- struct list_head *cur_hd, *tmp_hd;
- unsigned long flags;
-
- spin_lock_irqsave(&bus->dpc_tl_lock, flags);
- list_for_each_safe(cur_hd, tmp_hd, &bus->dpc_tsklst) {
- spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
+ while (atomic_read(&bus->dpc_tskcnt)) {
brcmf_sdbrcm_dpc(bus);
-
- spin_lock_irqsave(&bus->dpc_tl_lock, flags);
- list_del(cur_hd);
- kfree(cur_hd);
+ atomic_dec(&bus->dpc_tskcnt);
}
- spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
}
static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
@@ -3589,9 +3479,6 @@ static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
kfree(bus->rxbuf);
bus->rxctl = bus->rxbuf = NULL;
bus->rxlen = 0;
-
- kfree(bus->databuf);
- bus->databuf = NULL;
}
static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus)
@@ -3604,29 +3491,10 @@ static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus)
ALIGNMENT) + BRCMF_SDALIGN;
bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC);
if (!(bus->rxbuf))
- goto fail;
- }
-
- /* Allocate buffer to receive glomed packet */
- bus->databuf = kmalloc(MAX_DATA_BUF, GFP_ATOMIC);
- if (!(bus->databuf)) {
- /* release rxbuf which was already located as above */
- if (!bus->rxblen)
- kfree(bus->rxbuf);
- goto fail;
+ return false;
}
- /* Align the buffer */
- if ((unsigned long)bus->databuf % BRCMF_SDALIGN)
- bus->dataptr = bus->databuf + (BRCMF_SDALIGN -
- ((unsigned long)bus->databuf % BRCMF_SDALIGN));
- else
- bus->dataptr = bus->databuf;
-
return true;
-
-fail:
- return false;
}
static bool
@@ -3667,11 +3535,6 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
goto fail;
}
- if (!brcmf_sdbrcm_chipmatch((u16) bus->ci->chip)) {
- brcmf_err("unsupported chip: 0x%04x\n", bus->ci->chip);
- goto fail;
- }
-
if (brcmf_sdbrcm_kso_init(bus)) {
brcmf_err("error enabling KSO\n");
goto fail;
@@ -3770,10 +3633,6 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
bus->roundup = min(max_roundup, bus->blocksize);
- /* bus module does not support packet chaining */
- bus->use_rxchain = false;
- bus->sd_rxchain = false;
-
/* SR state */
bus->sleeping = false;
bus->sr_enabled = false;
@@ -3927,8 +3786,7 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
bus->watchdog_tsk = NULL;
}
/* Initialize DPC thread */
- INIT_LIST_HEAD(&bus->dpc_tsklst);
- spin_lock_init(&bus->dpc_tl_lock);
+ atomic_set(&bus->dpc_tskcnt, 0);
/* Assign bus interface call back */
bus->sdiodev->bus_if->dev = bus->sdiodev->dev;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
index 6ec5db9c60a5..e679214b3c98 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
@@ -101,7 +101,8 @@ struct brcmf_event;
BRCMF_ENUM_DEF(P2P_PROBEREQ_MSG, 72) \
BRCMF_ENUM_DEF(DCS_REQUEST, 73) \
BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \
- BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75)
+ BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75) \
+ BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127)
#define BRCMF_ENUM_DEF(id, val) \
BRCMF_E_##id = (val),
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
index 5352dc1fdf3c..29b1f24c2d0f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -22,7 +22,6 @@
#include <linux/etherdevice.h>
#include <linux/err.h>
#include <linux/jiffies.h>
-#include <uapi/linux/nl80211.h>
#include <net/cfg80211.h>
#include <brcmu_utils.h>
@@ -142,7 +141,7 @@ static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id)
#define BRCMF_FWS_FLOWCONTROL_HIWATER 128
#define BRCMF_FWS_FLOWCONTROL_LOWATER 64
-#define BRCMF_FWS_PSQ_PREC_COUNT ((NL80211_NUM_ACS + 1) * 2)
+#define BRCMF_FWS_PSQ_PREC_COUNT ((BRCMF_FWS_FIFO_COUNT + 1) * 2)
#define BRCMF_FWS_PSQ_LEN 256
#define BRCMF_FWS_HTOD_FLAG_PKTFROMHOST 0x01
@@ -157,11 +156,13 @@ static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id)
* @BRCMF_FWS_SKBSTATE_NEW: sk_buff is newly arrived in the driver.
* @BRCMF_FWS_SKBSTATE_DELAYED: sk_buff had to wait on queue.
* @BRCMF_FWS_SKBSTATE_SUPPRESSED: sk_buff has been suppressed by firmware.
+ * @BRCMF_FWS_SKBSTATE_TIM: allocated for TIM update info.
*/
enum brcmf_fws_skb_state {
BRCMF_FWS_SKBSTATE_NEW,
BRCMF_FWS_SKBSTATE_DELAYED,
- BRCMF_FWS_SKBSTATE_SUPPRESSED
+ BRCMF_FWS_SKBSTATE_SUPPRESSED,
+ BRCMF_FWS_SKBSTATE_TIM
};
/**
@@ -193,9 +194,8 @@ struct brcmf_skbuff_cb {
* b[11] - packet sent upon firmware request.
* b[10] - packet only contains signalling data.
* b[9] - packet is a tx packet.
- * b[8] - packet uses FIFO credit (non-pspoll).
+ * b[8] - packet used requested credit
* b[7] - interface in AP mode.
- * b[6:4] - AC FIFO number.
* b[3:0] - interface index.
*/
#define BRCMF_SKB_IF_FLAGS_REQUESTED_MASK 0x0800
@@ -204,12 +204,10 @@ struct brcmf_skbuff_cb {
#define BRCMF_SKB_IF_FLAGS_SIGNAL_ONLY_SHIFT 10
#define BRCMF_SKB_IF_FLAGS_TRANSMIT_MASK 0x0200
#define BRCMF_SKB_IF_FLAGS_TRANSMIT_SHIFT 9
-#define BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK 0x0100
-#define BRCMF_SKB_IF_FLAGS_CREDITCHECK_SHIFT 8
+#define BRCMF_SKB_IF_FLAGS_REQ_CREDIT_MASK 0x0100
+#define BRCMF_SKB_IF_FLAGS_REQ_CREDIT_SHIFT 8
#define BRCMF_SKB_IF_FLAGS_IF_AP_MASK 0x0080
#define BRCMF_SKB_IF_FLAGS_IF_AP_SHIFT 7
-#define BRCMF_SKB_IF_FLAGS_FIFO_MASK 0x0070
-#define BRCMF_SKB_IF_FLAGS_FIFO_SHIFT 4
#define BRCMF_SKB_IF_FLAGS_INDEX_MASK 0x000f
#define BRCMF_SKB_IF_FLAGS_INDEX_SHIFT 0
@@ -246,7 +244,7 @@ struct brcmf_skbuff_cb {
#define BRCMF_SKB_HTOD_TAG_HSLOT_MASK 0x00ffff00
#define BRCMF_SKB_HTOD_TAG_HSLOT_SHIFT 8
#define BRCMF_SKB_HTOD_TAG_FREERUN_MASK 0x000000ff
-#define BRCMF_SKB_HTOD_TAG_FREERUN_SHIFT 0
+#define BRCMF_SKB_HTOD_TAG_FREERUN_SHIFT 0
#define brcmf_skb_htod_tag_set_field(skb, field, value) \
brcmu_maskset32(&(brcmf_skbcb(skb)->htod), \
@@ -278,6 +276,7 @@ struct brcmf_skbuff_cb {
/**
* enum brcmf_fws_fifo - fifo indices used by dongle firmware.
*
+ * @BRCMF_FWS_FIFO_FIRST: first fifo, ie. background.
* @BRCMF_FWS_FIFO_AC_BK: fifo for background traffic.
* @BRCMF_FWS_FIFO_AC_BE: fifo for best-effort traffic.
* @BRCMF_FWS_FIFO_AC_VI: fifo for video traffic.
@@ -287,7 +286,8 @@ struct brcmf_skbuff_cb {
* @BRCMF_FWS_FIFO_COUNT: number of fifos.
*/
enum brcmf_fws_fifo {
- BRCMF_FWS_FIFO_AC_BK,
+ BRCMF_FWS_FIFO_FIRST,
+ BRCMF_FWS_FIFO_AC_BK = BRCMF_FWS_FIFO_FIRST,
BRCMF_FWS_FIFO_AC_BE,
BRCMF_FWS_FIFO_AC_VI,
BRCMF_FWS_FIFO_AC_VO,
@@ -307,12 +307,15 @@ enum brcmf_fws_fifo {
* firmware suppress the packet as device is already in PS mode.
* @BRCMF_FWS_TXSTATUS_FW_TOSSED:
* firmware tossed the packet.
+ * @BRCMF_FWS_TXSTATUS_HOST_TOSSED:
+ * host tossed the packet.
*/
enum brcmf_fws_txstatus {
BRCMF_FWS_TXSTATUS_DISCARD,
BRCMF_FWS_TXSTATUS_CORE_SUPPRESS,
BRCMF_FWS_TXSTATUS_FW_PS_SUPPRESS,
- BRCMF_FWS_TXSTATUS_FW_TOSSED
+ BRCMF_FWS_TXSTATUS_FW_TOSSED,
+ BRCMF_FWS_TXSTATUS_HOST_TOSSED
};
enum brcmf_fws_fcmode {
@@ -343,6 +346,7 @@ enum brcmf_fws_mac_desc_state {
* @transit_count: packet in transit to firmware.
*/
struct brcmf_fws_mac_descriptor {
+ char name[16];
u8 occupied;
u8 mac_handle;
u8 interface_id;
@@ -356,7 +360,6 @@ struct brcmf_fws_mac_descriptor {
u8 seq[BRCMF_FWS_FIFO_COUNT];
struct pktq psq;
int transit_count;
- int suppress_count;
int suppr_transit_count;
bool send_tim_signal;
u8 traffic_pending_bmp;
@@ -383,12 +386,10 @@ enum brcmf_fws_hanger_item_state {
* struct brcmf_fws_hanger_item - single entry for tx pending packet.
*
* @state: entry is either free or occupied.
- * @gen: generation.
* @pkt: packet itself.
*/
struct brcmf_fws_hanger_item {
enum brcmf_fws_hanger_item_state state;
- u8 gen;
struct sk_buff *pkt;
};
@@ -424,6 +425,7 @@ struct brcmf_fws_info {
struct brcmf_fws_stats stats;
struct brcmf_fws_hanger hanger;
enum brcmf_fws_fcmode fcmode;
+ bool bcmc_credit_check;
struct brcmf_fws_macdesc_table desc;
struct workqueue_struct *fws_wq;
struct work_struct fws_dequeue_work;
@@ -434,6 +436,8 @@ struct brcmf_fws_info {
u32 fifo_credit_map;
u32 fifo_delay_map;
unsigned long borrow_defer_timestamp;
+ bool bus_flow_blocked;
+ bool creditmap_received;
};
/*
@@ -507,7 +511,6 @@ static void brcmf_fws_hanger_init(struct brcmf_fws_hanger *hanger)
{
int i;
- brcmf_dbg(TRACE, "enter\n");
memset(hanger, 0, sizeof(*hanger));
for (i = 0; i < ARRAY_SIZE(hanger->items); i++)
hanger->items[i].state = BRCMF_FWS_HANGER_ITEM_STATE_FREE;
@@ -517,7 +520,6 @@ static u32 brcmf_fws_hanger_get_free_slot(struct brcmf_fws_hanger *h)
{
u32 i;
- brcmf_dbg(TRACE, "enter\n");
i = (h->slot_pos + 1) % BRCMF_FWS_HANGER_MAXITEMS;
while (i != h->slot_pos) {
@@ -533,14 +535,12 @@ static u32 brcmf_fws_hanger_get_free_slot(struct brcmf_fws_hanger *h)
h->failed_slotfind++;
i = BRCMF_FWS_HANGER_MAXITEMS;
done:
- brcmf_dbg(TRACE, "exit: %d\n", i);
return i;
}
static int brcmf_fws_hanger_pushpkt(struct brcmf_fws_hanger *h,
- struct sk_buff *pkt, u32 slot_id)
+ struct sk_buff *pkt, u32 slot_id)
{
- brcmf_dbg(TRACE, "enter\n");
if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS)
return -ENOENT;
@@ -560,7 +560,6 @@ static int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
u32 slot_id, struct sk_buff **pktout,
bool remove_item)
{
- brcmf_dbg(TRACE, "enter\n");
if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS)
return -ENOENT;
@@ -574,23 +573,18 @@ static int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
if (remove_item) {
h->items[slot_id].state = BRCMF_FWS_HANGER_ITEM_STATE_FREE;
h->items[slot_id].pkt = NULL;
- h->items[slot_id].gen = 0xff;
h->popped++;
}
return 0;
}
static int brcmf_fws_hanger_mark_suppressed(struct brcmf_fws_hanger *h,
- u32 slot_id, u8 gen)
+ u32 slot_id)
{
- brcmf_dbg(TRACE, "enter\n");
-
if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS)
return -ENOENT;
- h->items[slot_id].gen = gen;
-
- if (h->items[slot_id].state != BRCMF_FWS_HANGER_ITEM_STATE_INUSE) {
+ if (h->items[slot_id].state == BRCMF_FWS_HANGER_ITEM_STATE_FREE) {
brcmf_err("entry not in use\n");
return -EINVAL;
}
@@ -599,25 +593,6 @@ static int brcmf_fws_hanger_mark_suppressed(struct brcmf_fws_hanger *h,
return 0;
}
-static int brcmf_fws_hanger_get_genbit(struct brcmf_fws_hanger *hanger,
- struct sk_buff *pkt, u32 slot_id,
- int *gen)
-{
- brcmf_dbg(TRACE, "enter\n");
- *gen = 0xff;
-
- if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS)
- return -ENOENT;
-
- if (hanger->items[slot_id].state == BRCMF_FWS_HANGER_ITEM_STATE_FREE) {
- brcmf_err("slot not in use\n");
- return -EINVAL;
- }
-
- *gen = hanger->items[slot_id].gen;
- return 0;
-}
-
static void brcmf_fws_hanger_cleanup(struct brcmf_fws_info *fws,
bool (*fn)(struct sk_buff *, void *),
int ifidx)
@@ -627,7 +602,6 @@ static void brcmf_fws_hanger_cleanup(struct brcmf_fws_info *fws,
int i;
enum brcmf_fws_hanger_item_state s;
- brcmf_dbg(TRACE, "enter: ifidx=%d\n", ifidx);
for (i = 0; i < ARRAY_SIZE(h->items); i++) {
s = h->items[i].state;
if (s == BRCMF_FWS_HANGER_ITEM_STATE_INUSE ||
@@ -644,14 +618,28 @@ static void brcmf_fws_hanger_cleanup(struct brcmf_fws_info *fws,
}
}
-static void brcmf_fws_init_mac_descriptor(struct brcmf_fws_mac_descriptor *desc,
- u8 *addr, u8 ifidx)
+static void brcmf_fws_macdesc_set_name(struct brcmf_fws_info *fws,
+ struct brcmf_fws_mac_descriptor *desc)
+{
+ if (desc == &fws->desc.other)
+ strlcpy(desc->name, "MAC-OTHER", sizeof(desc->name));
+ else if (desc->mac_handle)
+ scnprintf(desc->name, sizeof(desc->name), "MAC-%d:%d",
+ desc->mac_handle, desc->interface_id);
+ else
+ scnprintf(desc->name, sizeof(desc->name), "MACIF:%d",
+ desc->interface_id);
+}
+
+static void brcmf_fws_macdesc_init(struct brcmf_fws_mac_descriptor *desc,
+ u8 *addr, u8 ifidx)
{
brcmf_dbg(TRACE,
"enter: desc %p ea=%pM, ifidx=%u\n", desc, addr, ifidx);
desc->occupied = 1;
desc->state = BRCMF_FWS_STATE_OPEN;
desc->requested_credit = 0;
+ desc->requested_packet = 0;
/* depending on use may need ifp->bssidx instead */
desc->interface_id = ifidx;
desc->ac_bitmap = 0xff; /* update this when handling APSD */
@@ -660,22 +648,22 @@ static void brcmf_fws_init_mac_descriptor(struct brcmf_fws_mac_descriptor *desc,
}
static
-void brcmf_fws_clear_mac_descriptor(struct brcmf_fws_mac_descriptor *desc)
+void brcmf_fws_macdesc_deinit(struct brcmf_fws_mac_descriptor *desc)
{
brcmf_dbg(TRACE,
"enter: ea=%pM, ifidx=%u\n", desc->ea, desc->interface_id);
desc->occupied = 0;
desc->state = BRCMF_FWS_STATE_CLOSE;
desc->requested_credit = 0;
+ desc->requested_packet = 0;
}
static struct brcmf_fws_mac_descriptor *
-brcmf_fws_mac_descriptor_lookup(struct brcmf_fws_info *fws, u8 *ea)
+brcmf_fws_macdesc_lookup(struct brcmf_fws_info *fws, u8 *ea)
{
struct brcmf_fws_mac_descriptor *entry;
int i;
- brcmf_dbg(TRACE, "enter: ea=%pM\n", ea);
if (ea == NULL)
return ERR_PTR(-EINVAL);
@@ -690,42 +678,33 @@ brcmf_fws_mac_descriptor_lookup(struct brcmf_fws_info *fws, u8 *ea)
}
static struct brcmf_fws_mac_descriptor*
-brcmf_fws_find_mac_desc(struct brcmf_fws_info *fws, struct brcmf_if *ifp,
- u8 *da)
+brcmf_fws_macdesc_find(struct brcmf_fws_info *fws, struct brcmf_if *ifp, u8 *da)
{
struct brcmf_fws_mac_descriptor *entry = &fws->desc.other;
bool multicast;
- enum nl80211_iftype iftype;
-
- brcmf_dbg(TRACE, "enter: idx=%d\n", ifp->bssidx);
multicast = is_multicast_ether_addr(da);
- iftype = brcmf_cfg80211_get_iftype(ifp);
- /* Multicast destination and P2P clients get the interface entry.
- * STA gets the interface entry if there is no exact match. For
- * example, TDLS destinations have their own entry.
+ /* Multicast destination, STA and P2P clients get the interface entry.
+ * STA/GC gets the Mac Entry for TDLS destinations, TDLS destinations
+ * have their own entry.
*/
- entry = NULL;
- if ((multicast || iftype == NL80211_IFTYPE_STATION ||
- iftype == NL80211_IFTYPE_P2P_CLIENT) && ifp->fws_desc)
+ if (multicast && ifp->fws_desc) {
entry = ifp->fws_desc;
-
- if (entry != NULL && iftype != NL80211_IFTYPE_STATION)
goto done;
+ }
- entry = brcmf_fws_mac_descriptor_lookup(fws, da);
+ entry = brcmf_fws_macdesc_lookup(fws, da);
if (IS_ERR(entry))
- entry = &fws->desc.other;
+ entry = ifp->fws_desc;
done:
- brcmf_dbg(TRACE, "exit: entry=%p\n", entry);
return entry;
}
-static bool brcmf_fws_mac_desc_closed(struct brcmf_fws_info *fws,
- struct brcmf_fws_mac_descriptor *entry,
- int fifo)
+static bool brcmf_fws_macdesc_closed(struct brcmf_fws_info *fws,
+ struct brcmf_fws_mac_descriptor *entry,
+ int fifo)
{
struct brcmf_fws_mac_descriptor *if_entry;
bool closed;
@@ -748,15 +727,11 @@ static bool brcmf_fws_mac_desc_closed(struct brcmf_fws_info *fws,
return closed || !(entry->ac_bitmap & BIT(fifo));
}
-static void brcmf_fws_mac_desc_cleanup(struct brcmf_fws_info *fws,
- struct brcmf_fws_mac_descriptor *entry,
- int ifidx)
+static void brcmf_fws_macdesc_cleanup(struct brcmf_fws_info *fws,
+ struct brcmf_fws_mac_descriptor *entry,
+ int ifidx)
{
- brcmf_dbg(TRACE, "enter: entry=(ea=%pM, ifid=%d), ifidx=%d\n",
- entry->ea, entry->interface_id, ifidx);
if (entry->occupied && (ifidx == -1 || ifidx == entry->interface_id)) {
- brcmf_dbg(TRACE, "flush psq: ifidx=%d, qlen=%d\n",
- ifidx, entry->psq.len);
brcmf_fws_psq_flush(fws, &entry->psq, ifidx);
entry->occupied = !!(entry->psq.len);
}
@@ -772,7 +747,6 @@ static void brcmf_fws_bus_txq_cleanup(struct brcmf_fws_info *fws,
int prec;
u32 hslot;
- brcmf_dbg(TRACE, "enter: ifidx=%d\n", ifidx);
txq = brcmf_bus_gettxq(fws->drvr->bus_if);
if (IS_ERR(txq)) {
brcmf_dbg(TRACE, "no txq to clean up\n");
@@ -798,7 +772,6 @@ static void brcmf_fws_cleanup(struct brcmf_fws_info *fws, int ifidx)
struct brcmf_fws_mac_descriptor *table;
bool (*matchfn)(struct sk_buff *, void *) = NULL;
- brcmf_dbg(TRACE, "enter: ifidx=%d\n", ifidx);
if (fws == NULL)
return;
@@ -808,51 +781,121 @@ static void brcmf_fws_cleanup(struct brcmf_fws_info *fws, int ifidx)
/* cleanup individual nodes */
table = &fws->desc.nodes[0];
for (i = 0; i < ARRAY_SIZE(fws->desc.nodes); i++)
- brcmf_fws_mac_desc_cleanup(fws, &table[i], ifidx);
+ brcmf_fws_macdesc_cleanup(fws, &table[i], ifidx);
- brcmf_fws_mac_desc_cleanup(fws, &fws->desc.other, ifidx);
+ brcmf_fws_macdesc_cleanup(fws, &fws->desc.other, ifidx);
brcmf_fws_bus_txq_cleanup(fws, matchfn, ifidx);
brcmf_fws_hanger_cleanup(fws, matchfn, ifidx);
}
-static void brcmf_fws_tim_update(struct brcmf_fws_info *ctx,
- struct brcmf_fws_mac_descriptor *entry,
- int prec)
+static int brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
{
- brcmf_dbg(TRACE, "enter: ea=%pM\n", entry->ea);
- if (entry->state == BRCMF_FWS_STATE_CLOSE) {
- /* check delayedQ and suppressQ in one call using bitmap */
- if (brcmu_pktq_mlen(&entry->psq, 3 << (prec * 2)) == 0)
- entry->traffic_pending_bmp =
- entry->traffic_pending_bmp & ~NBITVAL(prec);
- else
- entry->traffic_pending_bmp =
- entry->traffic_pending_bmp | NBITVAL(prec);
+ struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
+ u8 *wlh;
+ u16 data_offset = 0;
+ u8 fillers;
+ __le32 pkttag = cpu_to_le32(brcmf_skbcb(skb)->htod);
+
+ brcmf_dbg(TRACE, "enter: %s, idx=%d pkttag=0x%08X, hslot=%d\n",
+ entry->name, brcmf_skb_if_flags_get_field(skb, INDEX),
+ le32_to_cpu(pkttag), (le32_to_cpu(pkttag) >> 8) & 0xffff);
+ if (entry->send_tim_signal)
+ data_offset += 2 + BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN;
+
+ /* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */
+ data_offset += 2 + BRCMF_FWS_TYPE_PKTTAG_LEN;
+ fillers = round_up(data_offset, 4) - data_offset;
+ data_offset += fillers;
+
+ skb_push(skb, data_offset);
+ wlh = skb->data;
+
+ wlh[0] = BRCMF_FWS_TYPE_PKTTAG;
+ wlh[1] = BRCMF_FWS_TYPE_PKTTAG_LEN;
+ memcpy(&wlh[2], &pkttag, sizeof(pkttag));
+ wlh += BRCMF_FWS_TYPE_PKTTAG_LEN + 2;
+
+ if (entry->send_tim_signal) {
+ entry->send_tim_signal = 0;
+ wlh[0] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP;
+ wlh[1] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN;
+ wlh[2] = entry->mac_handle;
+ wlh[3] = entry->traffic_pending_bmp;
+ brcmf_dbg(TRACE, "adding TIM info: handle %d bmp 0x%X\n",
+ entry->mac_handle, entry->traffic_pending_bmp);
+ wlh += BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN + 2;
+ entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
}
- /* request a TIM update to firmware at the next piggyback opportunity */
+ if (fillers)
+ memset(wlh, BRCMF_FWS_TYPE_FILLER, fillers);
+
+ brcmf_proto_hdrpush(fws->drvr, brcmf_skb_if_flags_get_field(skb, INDEX),
+ data_offset >> 2, skb);
+ return 0;
+}
+
+static bool brcmf_fws_tim_update(struct brcmf_fws_info *fws,
+ struct brcmf_fws_mac_descriptor *entry,
+ int fifo, bool send_immediately)
+{
+ struct sk_buff *skb;
+ struct brcmf_bus *bus;
+ struct brcmf_skbuff_cb *skcb;
+ s32 err;
+ u32 len;
+
+ /* check delayedQ and suppressQ in one call using bitmap */
+ if (brcmu_pktq_mlen(&entry->psq, 3 << (fifo * 2)) == 0)
+ entry->traffic_pending_bmp &= ~NBITVAL(fifo);
+ else
+ entry->traffic_pending_bmp |= NBITVAL(fifo);
+
+ entry->send_tim_signal = false;
if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp)
entry->send_tim_signal = true;
+ if (send_immediately && entry->send_tim_signal &&
+ entry->state == BRCMF_FWS_STATE_CLOSE) {
+ /* create a dummy packet and sent that. The traffic */
+ /* bitmap info will automatically be attached to that packet */
+ len = BRCMF_FWS_TYPE_PKTTAG_LEN + 2 +
+ BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN + 2 +
+ 4 + fws->drvr->hdrlen;
+ skb = brcmu_pkt_buf_get_skb(len);
+ if (skb == NULL)
+ return false;
+ skb_pull(skb, len);
+ skcb = brcmf_skbcb(skb);
+ skcb->mac = entry;
+ skcb->state = BRCMF_FWS_SKBSTATE_TIM;
+ bus = fws->drvr->bus_if;
+ err = brcmf_fws_hdrpush(fws, skb);
+ if (err == 0)
+ err = brcmf_bus_txdata(bus, skb);
+ if (err)
+ brcmu_pkt_buf_free_skb(skb);
+ return true;
+ }
+ return false;
}
static void
brcmf_fws_flow_control_check(struct brcmf_fws_info *fws, struct pktq *pq,
u8 if_id)
{
- struct brcmf_if *ifp = fws->drvr->iflist[if_id];
+ struct brcmf_if *ifp = fws->drvr->iflist[!if_id ? 0 : if_id + 1];
if (WARN_ON(!ifp))
return;
- brcmf_dbg(TRACE,
- "enter: bssidx=%d, ifidx=%d\n", ifp->bssidx, ifp->ifidx);
-
if ((ifp->netif_stop & BRCMF_NETIF_STOP_REASON_FWS_FC) &&
pq->len <= BRCMF_FWS_FLOWCONTROL_LOWATER)
brcmf_txflowblock_if(ifp,
BRCMF_NETIF_STOP_REASON_FWS_FC, false);
if (!(ifp->netif_stop & BRCMF_NETIF_STOP_REASON_FWS_FC) &&
- pq->len >= BRCMF_FWS_FLOWCONTROL_HIWATER)
+ pq->len >= BRCMF_FWS_FLOWCONTROL_HIWATER) {
+ fws->stats.fws_flow_block++;
brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_FWS_FC, true);
+ }
return;
}
@@ -862,10 +905,26 @@ static int brcmf_fws_rssi_indicate(struct brcmf_fws_info *fws, s8 rssi)
return 0;
}
+/* using macro so sparse checking does not complain
+ * about locking imbalance.
+ */
+#define brcmf_fws_lock(drvr, flags) \
+do { \
+ flags = 0; \
+ spin_lock_irqsave(&((drvr)->fws_spinlock), (flags)); \
+} while (0)
+
+/* using macro so sparse checking does not complain
+ * about locking imbalance.
+ */
+#define brcmf_fws_unlock(drvr, flags) \
+ spin_unlock_irqrestore(&((drvr)->fws_spinlock), (flags))
+
static
int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
{
struct brcmf_fws_mac_descriptor *entry, *existing;
+ ulong flags;
u8 mac_handle;
u8 ifidx;
u8 *addr;
@@ -876,34 +935,44 @@ int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
entry = &fws->desc.nodes[mac_handle & 0x1F];
if (type == BRCMF_FWS_TYPE_MACDESC_DEL) {
- brcmf_dbg(TRACE, "deleting mac %pM idx %d\n", addr, ifidx);
if (entry->occupied) {
- brcmf_fws_mac_desc_cleanup(fws, entry, -1);
- brcmf_fws_clear_mac_descriptor(entry);
+ brcmf_dbg(TRACE, "deleting %s mac %pM\n",
+ entry->name, addr);
+ brcmf_fws_lock(fws->drvr, flags);
+ brcmf_fws_macdesc_cleanup(fws, entry, -1);
+ brcmf_fws_macdesc_deinit(entry);
+ brcmf_fws_unlock(fws->drvr, flags);
} else
fws->stats.mac_update_failed++;
return 0;
}
- brcmf_dbg(TRACE,
- "add mac %pM handle %u idx %d\n", addr, mac_handle, ifidx);
- existing = brcmf_fws_mac_descriptor_lookup(fws, addr);
+ existing = brcmf_fws_macdesc_lookup(fws, addr);
if (IS_ERR(existing)) {
if (!entry->occupied) {
+ brcmf_fws_lock(fws->drvr, flags);
entry->mac_handle = mac_handle;
- brcmf_fws_init_mac_descriptor(entry, addr, ifidx);
+ brcmf_fws_macdesc_init(entry, addr, ifidx);
+ brcmf_fws_macdesc_set_name(fws, entry);
brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT,
BRCMF_FWS_PSQ_LEN);
+ brcmf_fws_unlock(fws->drvr, flags);
+ brcmf_dbg(TRACE, "add %s mac %pM\n", entry->name, addr);
} else {
fws->stats.mac_update_failed++;
}
} else {
if (entry != existing) {
- brcmf_dbg(TRACE, "relocate mac\n");
+ brcmf_dbg(TRACE, "copy mac %s\n", existing->name);
+ brcmf_fws_lock(fws->drvr, flags);
memcpy(entry, existing,
offsetof(struct brcmf_fws_mac_descriptor, psq));
entry->mac_handle = mac_handle;
- brcmf_fws_clear_mac_descriptor(existing);
+ brcmf_fws_macdesc_deinit(existing);
+ brcmf_fws_macdesc_set_name(fws, entry);
+ brcmf_fws_unlock(fws->drvr, flags);
+ brcmf_dbg(TRACE, "relocate %s mac %pM\n", entry->name,
+ addr);
} else {
brcmf_dbg(TRACE, "use existing\n");
WARN_ON(entry->mac_handle != mac_handle);
@@ -917,8 +986,9 @@ static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws,
u8 type, u8 *data)
{
struct brcmf_fws_mac_descriptor *entry;
+ ulong flags;
u8 mac_handle;
- int i;
+ int ret;
mac_handle = data[0];
entry = &fws->desc.nodes[mac_handle & 0x1F];
@@ -926,30 +996,35 @@ static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws,
fws->stats.mac_ps_update_failed++;
return -ESRCH;
}
-
- /* a state update should wipe old credits? */
+ brcmf_fws_lock(fws->drvr, flags);
+ /* a state update should wipe old credits */
entry->requested_credit = 0;
+ entry->requested_packet = 0;
if (type == BRCMF_FWS_TYPE_MAC_OPEN) {
entry->state = BRCMF_FWS_STATE_OPEN;
- return BRCMF_FWS_RET_OK_SCHEDULE;
+ ret = BRCMF_FWS_RET_OK_SCHEDULE;
} else {
entry->state = BRCMF_FWS_STATE_CLOSE;
- for (i = BRCMF_FWS_FIFO_AC_BE; i < NL80211_NUM_ACS; i++)
- brcmf_fws_tim_update(fws, entry, i);
+ brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_BK, false);
+ brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_BE, false);
+ brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_VI, false);
+ brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_VO, true);
+ ret = BRCMF_FWS_RET_OK_NOSCHEDULE;
}
- return BRCMF_FWS_RET_OK_NOSCHEDULE;
+ brcmf_fws_unlock(fws->drvr, flags);
+ return ret;
}
static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws,
u8 type, u8 *data)
{
struct brcmf_fws_mac_descriptor *entry;
+ ulong flags;
u8 ifidx;
int ret;
ifidx = data[0];
- brcmf_dbg(TRACE, "enter: ifidx=%d\n", ifidx);
if (ifidx >= BRCMF_MAX_IFS) {
ret = -ERANGE;
goto fail;
@@ -961,17 +1036,26 @@ static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws,
goto fail;
}
+ brcmf_dbg(TRACE, "%s (%d): %s\n", brcmf_fws_get_tlv_name(type), type,
+ entry->name);
+ brcmf_fws_lock(fws->drvr, flags);
switch (type) {
case BRCMF_FWS_TYPE_INTERFACE_OPEN:
entry->state = BRCMF_FWS_STATE_OPEN;
- return BRCMF_FWS_RET_OK_SCHEDULE;
+ ret = BRCMF_FWS_RET_OK_SCHEDULE;
+ break;
case BRCMF_FWS_TYPE_INTERFACE_CLOSE:
entry->state = BRCMF_FWS_STATE_CLOSE;
- return BRCMF_FWS_RET_OK_NOSCHEDULE;
+ ret = BRCMF_FWS_RET_OK_NOSCHEDULE;
+ break;
default:
ret = -EINVAL;
- break;
+ brcmf_fws_unlock(fws->drvr, flags);
+ goto fail;
}
+ brcmf_fws_unlock(fws->drvr, flags);
+ return ret;
+
fail:
fws->stats.if_update_failed++;
return ret;
@@ -981,6 +1065,7 @@ static int brcmf_fws_request_indicate(struct brcmf_fws_info *fws, u8 type,
u8 *data)
{
struct brcmf_fws_mac_descriptor *entry;
+ ulong flags;
entry = &fws->desc.nodes[data[1] & 0x1F];
if (!entry->occupied) {
@@ -991,15 +1076,51 @@ static int brcmf_fws_request_indicate(struct brcmf_fws_info *fws, u8 type,
return -ESRCH;
}
+ brcmf_dbg(TRACE, "%s (%d): %s cnt %d bmp %d\n",
+ brcmf_fws_get_tlv_name(type), type, entry->name,
+ data[0], data[2]);
+ brcmf_fws_lock(fws->drvr, flags);
if (type == BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT)
entry->requested_credit = data[0];
else
entry->requested_packet = data[0];
entry->ac_bitmap = data[2];
+ brcmf_fws_unlock(fws->drvr, flags);
return BRCMF_FWS_RET_OK_SCHEDULE;
}
+static void
+brcmf_fws_macdesc_use_req_credit(struct brcmf_fws_mac_descriptor *entry,
+ struct sk_buff *skb)
+{
+ if (entry->requested_credit > 0) {
+ entry->requested_credit--;
+ brcmf_skb_if_flags_set_field(skb, REQUESTED, 1);
+ brcmf_skb_if_flags_set_field(skb, REQ_CREDIT, 1);
+ if (entry->state != BRCMF_FWS_STATE_CLOSE)
+ brcmf_err("requested credit set while mac not closed!\n");
+ } else if (entry->requested_packet > 0) {
+ entry->requested_packet--;
+ brcmf_skb_if_flags_set_field(skb, REQUESTED, 1);
+ brcmf_skb_if_flags_set_field(skb, REQ_CREDIT, 0);
+ if (entry->state != BRCMF_FWS_STATE_CLOSE)
+ brcmf_err("requested packet set while mac not closed!\n");
+ } else {
+ brcmf_skb_if_flags_set_field(skb, REQUESTED, 0);
+ brcmf_skb_if_flags_set_field(skb, REQ_CREDIT, 0);
+ }
+}
+
+static void brcmf_fws_macdesc_return_req_credit(struct sk_buff *skb)
+{
+ struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
+
+ if ((brcmf_skb_if_flags_get_field(skb, REQ_CREDIT)) &&
+ (entry->state == BRCMF_FWS_STATE_CLOSE))
+ entry->requested_credit++;
+}
+
static void brcmf_fws_return_credits(struct brcmf_fws_info *fws,
u8 fifo, u8 credits)
{
@@ -1010,6 +1131,8 @@ static void brcmf_fws_return_credits(struct brcmf_fws_info *fws,
if (!credits)
return;
+ fws->fifo_credit_map |= 1 << fifo;
+
if ((fifo == BRCMF_FWS_FIFO_AC_BE) &&
(fws->credits_borrowed[0])) {
for (lender_ac = BRCMF_FWS_FIFO_AC_VO; lender_ac >= 0;
@@ -1031,7 +1154,6 @@ static void brcmf_fws_return_credits(struct brcmf_fws_info *fws,
}
}
- fws->fifo_credit_map |= 1 << fifo;
fws->fifo_credit[fifo] += credits;
}
@@ -1042,27 +1164,6 @@ static void brcmf_fws_schedule_deq(struct brcmf_fws_info *fws)
queue_work(fws->fws_wq, &fws->fws_dequeue_work);
}
-static void brcmf_skb_pick_up_credit(struct brcmf_fws_info *fws, int fifo,
- struct sk_buff *p)
-{
- struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(p)->mac;
-
- if (brcmf_skbcb(p)->if_flags & BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK) {
- if (fws->fcmode != BRCMF_FWS_FCMODE_IMPLIED_CREDIT)
- return;
- brcmf_fws_return_credits(fws, fifo, 1);
- } else {
- /*
- * if this packet did not count against FIFO credit, it
- * must have taken a requested_credit from the destination
- * entry (for pspoll etc.)
- */
- if (!brcmf_skb_if_flags_get_field(p, REQUESTED))
- entry->requested_credit++;
- }
- brcmf_fws_schedule_deq(fws);
-}
-
static int brcmf_fws_enq(struct brcmf_fws_info *fws,
enum brcmf_fws_skb_state state, int fifo,
struct sk_buff *p)
@@ -1078,7 +1179,7 @@ static int brcmf_fws_enq(struct brcmf_fws_info *fws,
return -ENOENT;
}
- brcmf_dbg(TRACE, "enter: ea=%pM, qlen=%d\n", entry->ea, entry->psq.len);
+ brcmf_dbg(DATA, "enter: fifo %d skb %p\n", fifo, p);
if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED) {
prec += 1;
qfull_stat = &fws->stats.supprq_full_error;
@@ -1095,14 +1196,12 @@ static int brcmf_fws_enq(struct brcmf_fws_info *fws,
/* update the sk_buff state */
brcmf_skbcb(p)->state = state;
- if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED)
- entry->suppress_count++;
/*
* A packet has been pushed so update traffic
* availability bitmap, if applicable
*/
- brcmf_fws_tim_update(fws, entry, fifo);
+ brcmf_fws_tim_update(fws, entry, fifo, true);
brcmf_fws_flow_control_check(fws, &entry->psq,
brcmf_skb_if_flags_get_field(p, INDEX));
return 0;
@@ -1113,7 +1212,6 @@ static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo)
struct brcmf_fws_mac_descriptor *table;
struct brcmf_fws_mac_descriptor *entry;
struct sk_buff *p;
- int use_credit = 1;
int num_nodes;
int node_pos;
int prec_out;
@@ -1127,7 +1225,7 @@ static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo)
for (i = 0; i < num_nodes; i++) {
entry = &table[(node_pos + i) % num_nodes];
if (!entry->occupied ||
- brcmf_fws_mac_desc_closed(fws, entry, fifo))
+ brcmf_fws_macdesc_closed(fws, entry, fifo))
continue;
if (entry->suppressed)
@@ -1137,9 +1235,8 @@ static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo)
p = brcmu_pktq_mdeq(&entry->psq, pmsk << (fifo * 2), &prec_out);
if (p == NULL) {
if (entry->suppressed) {
- if (entry->suppr_transit_count >
- entry->suppress_count)
- return NULL;
+ if (entry->suppr_transit_count)
+ continue;
entry->suppressed = false;
p = brcmu_pktq_mdeq(&entry->psq,
1 << (fifo * 2), &prec_out);
@@ -1148,26 +1245,7 @@ static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo)
if (p == NULL)
continue;
- /* did the packet come from suppress sub-queue? */
- if (entry->requested_credit > 0) {
- entry->requested_credit--;
- /*
- * if the packet was pulled out while destination is in
- * closed state but had a non-zero packets requested,
- * then this should not count against the FIFO credit.
- * That is due to the fact that the firmware will
- * most likely hold onto this packet until a suitable
- * time later to push it to the appropriate AC FIFO.
- */
- if (entry->state == BRCMF_FWS_STATE_CLOSE)
- use_credit = 0;
- } else if (entry->requested_packet > 0) {
- entry->requested_packet--;
- brcmf_skb_if_flags_set_field(p, REQUESTED, 1);
- if (entry->state == BRCMF_FWS_STATE_CLOSE)
- use_credit = 0;
- }
- brcmf_skb_if_flags_set_field(p, CREDITCHECK, use_credit);
+ brcmf_fws_macdesc_use_req_credit(entry, p);
/* move dequeue position to ensure fair round-robin */
fws->deq_node_pos[fifo] = (node_pos + i + 1) % num_nodes;
@@ -1179,7 +1257,7 @@ static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo)
* A packet has been picked up, update traffic
* availability bitmap, if applicable
*/
- brcmf_fws_tim_update(fws, entry, fifo);
+ brcmf_fws_tim_update(fws, entry, fifo, false);
/*
* decrement total enqueued fifo packets and
@@ -1192,7 +1270,7 @@ static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo)
}
p = NULL;
done:
- brcmf_dbg(TRACE, "exit: fifo %d skb %p\n", fifo, p);
+ brcmf_dbg(DATA, "exit: fifo %d skb %p\n", fifo, p);
return p;
}
@@ -1202,22 +1280,26 @@ static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
u32 hslot;
int ret;
+ u8 ifidx;
hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
/* this packet was suppressed */
- if (!entry->suppressed || entry->generation != genbit) {
+ if (!entry->suppressed) {
entry->suppressed = true;
- entry->suppress_count = brcmu_pktq_mlen(&entry->psq,
- 1 << (fifo * 2 + 1));
entry->suppr_transit_count = entry->transit_count;
+ brcmf_dbg(DATA, "suppress %s: transit %d\n",
+ entry->name, entry->transit_count);
}
entry->generation = genbit;
- ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo, skb);
+ ret = brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb);
+ if (ret == 0)
+ ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo,
+ skb);
if (ret != 0) {
- /* suppress q is full, drop this packet */
+ /* suppress q is full or hdrpull failed, drop this packet */
brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
true);
} else {
@@ -1225,26 +1307,24 @@ static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
* Mark suppressed to avoid a double free during
* wlfc cleanup
*/
- brcmf_fws_hanger_mark_suppressed(&fws->hanger, hslot,
- genbit);
- entry->suppress_count++;
+ brcmf_fws_hanger_mark_suppressed(&fws->hanger, hslot);
}
return ret;
}
static int
-brcmf_fws_txstatus_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
+brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
u32 genbit)
{
u32 fifo;
int ret;
bool remove_from_hanger = true;
struct sk_buff *skb;
+ struct brcmf_skbuff_cb *skcb;
struct brcmf_fws_mac_descriptor *entry = NULL;
- brcmf_dbg(TRACE, "status: flags=0x%X, hslot=%d\n",
- flags, hslot);
+ brcmf_dbg(DATA, "flags %d\n", flags);
if (flags == BRCMF_FWS_TXSTATUS_DISCARD)
fws->stats.txs_discard++;
@@ -1256,6 +1336,8 @@ brcmf_fws_txstatus_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
remove_from_hanger = false;
} else if (flags == BRCMF_FWS_TXSTATUS_FW_TOSSED)
fws->stats.txs_tossed++;
+ else if (flags == BRCMF_FWS_TXSTATUS_HOST_TOSSED)
+ fws->stats.txs_host_tossed++;
else
brcmf_err("unexpected txstatus\n");
@@ -1266,32 +1348,42 @@ brcmf_fws_txstatus_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
return ret;
}
- entry = brcmf_skbcb(skb)->mac;
+ skcb = brcmf_skbcb(skb);
+ entry = skcb->mac;
if (WARN_ON(!entry)) {
brcmu_pkt_buf_free_skb(skb);
return -EINVAL;
}
+ entry->transit_count--;
+ if (entry->suppressed && entry->suppr_transit_count)
+ entry->suppr_transit_count--;
+
+ brcmf_dbg(DATA, "%s flags %X htod %X\n", entry->name, skcb->if_flags,
+ skcb->htod);
/* pick up the implicit credit from this packet */
fifo = brcmf_skb_htod_tag_get_field(skb, FIFO);
- brcmf_skb_pick_up_credit(fws, fifo, skb);
+ if ((fws->fcmode == BRCMF_FWS_FCMODE_IMPLIED_CREDIT) ||
+ (brcmf_skb_if_flags_get_field(skb, REQ_CREDIT)) ||
+ (flags == BRCMF_FWS_TXSTATUS_HOST_TOSSED)) {
+ brcmf_fws_return_credits(fws, fifo, 1);
+ brcmf_fws_schedule_deq(fws);
+ }
+ brcmf_fws_macdesc_return_req_credit(skb);
if (!remove_from_hanger)
ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, genbit);
- if (remove_from_hanger || ret) {
- entry->transit_count--;
- if (entry->suppressed)
- entry->suppr_transit_count--;
-
+ if (remove_from_hanger || ret)
brcmf_txfinalize(fws->drvr, skb, true);
- }
+
return 0;
}
static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws,
u8 *data)
{
+ ulong flags;
int i;
if (fws->fcmode != BRCMF_FWS_FCMODE_EXPLICIT_CREDIT) {
@@ -1299,17 +1391,20 @@ static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws,
return BRCMF_FWS_RET_OK_NOSCHEDULE;
}
- brcmf_dbg(TRACE, "enter: data %pM\n", data);
+ brcmf_dbg(DATA, "enter: data %pM\n", data);
+ brcmf_fws_lock(fws->drvr, flags);
for (i = 0; i < BRCMF_FWS_FIFO_COUNT; i++)
brcmf_fws_return_credits(fws, i, data[i]);
- brcmf_dbg(INFO, "map: credit %x delay %x\n", fws->fifo_credit_map,
+ brcmf_dbg(DATA, "map: credit %x delay %x\n", fws->fifo_credit_map,
fws->fifo_delay_map);
+ brcmf_fws_unlock(fws->drvr, flags);
return BRCMF_FWS_RET_OK_SCHEDULE;
}
static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data)
{
+ ulong lflags;
__le32 status_le;
u32 status;
u32 hslot;
@@ -1323,7 +1418,10 @@ static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data)
hslot = brcmf_txstatus_get_field(status, HSLOT);
genbit = brcmf_txstatus_get_field(status, GENERATION);
- return brcmf_fws_txstatus_process(fws, flags, hslot, genbit);
+ brcmf_fws_lock(fws->drvr, lflags);
+ brcmf_fws_txs_process(fws, flags, hslot, genbit);
+ brcmf_fws_unlock(fws->drvr, lflags);
+ return BRCMF_FWS_RET_OK_NOSCHEDULE;
}
static int brcmf_fws_dbg_seqnum_check(struct brcmf_fws_info *fws, u8 *data)
@@ -1331,26 +1429,11 @@ static int brcmf_fws_dbg_seqnum_check(struct brcmf_fws_info *fws, u8 *data)
__le32 timestamp;
memcpy(&timestamp, &data[2], sizeof(timestamp));
- brcmf_dbg(INFO, "received: seq %d, timestamp %d\n", data[1],
+ brcmf_dbg(CTL, "received: seq %d, timestamp %d\n", data[1],
le32_to_cpu(timestamp));
return 0;
}
-/* using macro so sparse checking does not complain
- * about locking imbalance.
- */
-#define brcmf_fws_lock(drvr, flags) \
-do { \
- flags = 0; \
- spin_lock_irqsave(&((drvr)->fws_spinlock), (flags)); \
-} while (0)
-
-/* using macro so sparse checking does not complain
- * about locking imbalance.
- */
-#define brcmf_fws_unlock(drvr, flags) \
- spin_unlock_irqrestore(&((drvr)->fws_spinlock), (flags))
-
static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
const struct brcmf_event_msg *e,
void *data)
@@ -1364,6 +1447,10 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
brcmf_err("event payload too small (%d)\n", e->datalen);
return -EINVAL;
}
+ if (fws->creditmap_received)
+ return 0;
+
+ fws->creditmap_received = true;
brcmf_dbg(TRACE, "enter: credits %pM\n", credits);
brcmf_fws_lock(ifp->drvr, flags);
@@ -1379,11 +1466,24 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
return 0;
}
+static int brcmf_fws_notify_bcmc_credit_support(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e,
+ void *data)
+{
+ struct brcmf_fws_info *fws = ifp->drvr->fws;
+ ulong flags;
+
+ brcmf_fws_lock(ifp->drvr, flags);
+ if (fws)
+ fws->bcmc_credit_check = true;
+ brcmf_fws_unlock(ifp->drvr, flags);
+ return 0;
+}
+
int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
struct sk_buff *skb)
{
struct brcmf_fws_info *fws = drvr->fws;
- ulong flags;
u8 *signal_data;
s16 data_len;
u8 type;
@@ -1392,7 +1492,7 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
s32 status;
s32 err;
- brcmf_dbg(TRACE, "enter: ifidx %d, skblen %u, sig %d\n",
+ brcmf_dbg(HDRS, "enter: ifidx %d, skblen %u, sig %d\n",
ifidx, skb->len, signal_len);
WARN_ON(signal_len > skb->len);
@@ -1403,9 +1503,6 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
return 0;
}
- /* lock during tlv parsing */
- brcmf_fws_lock(drvr, flags);
-
fws->stats.header_pulls++;
data_len = signal_len;
signal_data = skb->data;
@@ -1426,14 +1523,15 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
len = signal_data[1];
data = signal_data + 2;
- brcmf_dbg(INFO, "tlv type=%d (%s), len=%d, data[0]=%d\n", type,
- brcmf_fws_get_tlv_name(type), len, *data);
+ brcmf_dbg(HDRS, "tlv type=%s (%d), len=%d (%d)\n",
+ brcmf_fws_get_tlv_name(type), type, len,
+ brcmf_fws_get_tlv_len(fws, type));
/* abort parsing when length invalid */
if (data_len < len + 2)
break;
- if (len != brcmf_fws_get_tlv_len(fws, type))
+ if (len < brcmf_fws_get_tlv_len(fws, type))
break;
err = BRCMF_FWS_RET_OK_NOSCHEDULE;
@@ -1498,203 +1596,74 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
if (skb->len == 0)
fws->stats.header_only_pkt++;
- brcmf_fws_unlock(drvr, flags);
- return 0;
-}
-
-static int brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
-{
- struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
- u8 *wlh;
- u16 data_offset = 0;
- u8 fillers;
- __le32 pkttag = cpu_to_le32(brcmf_skbcb(skb)->htod);
-
- brcmf_dbg(TRACE, "enter: ea=%pM, ifidx=%u, pkttag=0x%08X\n",
- entry->ea, entry->interface_id, le32_to_cpu(pkttag));
- if (entry->send_tim_signal)
- data_offset += 2 + BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN;
-
- /* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */
- data_offset += 2 + BRCMF_FWS_TYPE_PKTTAG_LEN;
- fillers = round_up(data_offset, 4) - data_offset;
- data_offset += fillers;
-
- skb_push(skb, data_offset);
- wlh = skb->data;
-
- wlh[0] = BRCMF_FWS_TYPE_PKTTAG;
- wlh[1] = BRCMF_FWS_TYPE_PKTTAG_LEN;
- memcpy(&wlh[2], &pkttag, sizeof(pkttag));
- wlh += BRCMF_FWS_TYPE_PKTTAG_LEN + 2;
-
- if (entry->send_tim_signal) {
- entry->send_tim_signal = 0;
- wlh[0] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP;
- wlh[1] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN;
- wlh[2] = entry->mac_handle;
- wlh[3] = entry->traffic_pending_bmp;
- wlh += BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN + 2;
- entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
- }
- if (fillers)
- memset(wlh, BRCMF_FWS_TYPE_FILLER, fillers);
-
- brcmf_proto_hdrpush(fws->drvr, brcmf_skb_if_flags_get_field(skb, INDEX),
- data_offset >> 2, skb);
return 0;
}
-static int brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo,
+static void brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo,
struct sk_buff *p)
{
struct brcmf_skbuff_cb *skcb = brcmf_skbcb(p);
struct brcmf_fws_mac_descriptor *entry = skcb->mac;
- int rc = 0;
- bool header_needed;
- int hslot = BRCMF_FWS_HANGER_MAXITEMS;
- u8 free_ctr;
- u8 ifidx;
u8 flags;
- header_needed = skcb->state != BRCMF_FWS_SKBSTATE_SUPPRESSED;
-
- if (header_needed) {
- /* obtaining free slot may fail, but that will be caught
- * by the hanger push. This assures the packet has a BDC
- * header upon return.
- */
- hslot = brcmf_fws_hanger_get_free_slot(&fws->hanger);
- free_ctr = entry->seq[fifo];
- brcmf_skb_htod_tag_set_field(p, HSLOT, hslot);
- brcmf_skb_htod_tag_set_field(p, FREERUN, free_ctr);
- brcmf_skb_htod_tag_set_field(p, GENERATION, 1);
- entry->transit_count++;
- }
brcmf_skb_if_flags_set_field(p, TRANSMIT, 1);
- brcmf_skb_htod_tag_set_field(p, FIFO, fifo);
-
+ brcmf_skb_htod_tag_set_field(p, GENERATION, entry->generation);
flags = BRCMF_FWS_HTOD_FLAG_PKTFROMHOST;
- if (!(skcb->if_flags & BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK)) {
+ if (brcmf_skb_if_flags_get_field(p, REQUESTED)) {
/*
- Indicate that this packet is being sent in response to an
- explicit request from the firmware side.
- */
+ * Indicate that this packet is being sent in response to an
+ * explicit request from the firmware side.
+ */
flags |= BRCMF_FWS_HTOD_FLAG_PKT_REQUESTED;
}
brcmf_skb_htod_tag_set_field(p, FLAGS, flags);
- if (header_needed) {
- brcmf_fws_hdrpush(fws, p);
- rc = brcmf_fws_hanger_pushpkt(&fws->hanger, p, hslot);
- if (rc)
- brcmf_err("hanger push failed: rc=%d\n", rc);
- } else {
- int gen;
-
- /* remove old header */
- rc = brcmf_proto_hdrpull(fws->drvr, false, &ifidx, p);
- if (rc == 0) {
- hslot = brcmf_skb_htod_tag_get_field(p, HSLOT);
- brcmf_fws_hanger_get_genbit(&fws->hanger, p,
- hslot, &gen);
- brcmf_skb_htod_tag_set_field(p, GENERATION, gen);
-
- /* push new header */
- brcmf_fws_hdrpush(fws, p);
- }
- }
-
- return rc;
+ brcmf_fws_hdrpush(fws, p);
}
-static void
-brcmf_fws_rollback_toq(struct brcmf_fws_info *fws, struct sk_buff *skb)
+static void brcmf_fws_rollback_toq(struct brcmf_fws_info *fws,
+ struct sk_buff *skb, int fifo)
{
- /*
- put the packet back to the head of queue
-
- - suppressed packet goes back to suppress sub-queue
- - pull out the header, if new or delayed packet
-
- Note: hslot is used only when header removal is done.
- */
struct brcmf_fws_mac_descriptor *entry;
- enum brcmf_fws_skb_state state;
struct sk_buff *pktout;
+ int qidx, hslot;
int rc = 0;
- int fifo;
- int hslot;
- u8 ifidx;
- fifo = brcmf_skb_if_flags_get_field(skb, FIFO);
- state = brcmf_skbcb(skb)->state;
entry = brcmf_skbcb(skb)->mac;
-
- if (entry != NULL) {
- if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED) {
- /* wl-header is saved for suppressed packets */
- pktout = brcmu_pktq_penq_head(&entry->psq, 2 * fifo + 1,
- skb);
- if (pktout == NULL) {
- brcmf_err("suppress queue full\n");
- rc = -ENOSPC;
- }
- } else {
- hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
-
- /* remove header first */
- rc = brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb);
- if (rc) {
- brcmf_err("header removal failed\n");
- /* free the hanger slot */
- brcmf_fws_hanger_poppkt(&fws->hanger, hslot,
- &pktout, true);
- rc = -EINVAL;
- goto fail;
- }
-
- /* delay-q packets are going to delay-q */
- pktout = brcmu_pktq_penq_head(&entry->psq,
- 2 * fifo, skb);
- if (pktout == NULL) {
- brcmf_err("delay queue full\n");
- rc = -ENOSPC;
- }
-
- /* free the hanger slot */
- brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &pktout,
- true);
-
- /* decrement sequence count */
- entry->seq[fifo]--;
+ if (entry->occupied) {
+ qidx = 2 * fifo;
+ if (brcmf_skbcb(skb)->state == BRCMF_FWS_SKBSTATE_SUPPRESSED)
+ qidx++;
+
+ pktout = brcmu_pktq_penq_head(&entry->psq, qidx, skb);
+ if (pktout == NULL) {
+ brcmf_err("%s queue %d full\n", entry->name, qidx);
+ rc = -ENOSPC;
}
- /*
- if this packet did not count against FIFO credit, it must have
- taken a requested_credit from the firmware (for pspoll etc.)
- */
- if (!(brcmf_skbcb(skb)->if_flags &
- BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK))
- entry->requested_credit++;
} else {
- brcmf_err("no mac entry linked\n");
+ brcmf_err("%s entry removed\n", entry->name);
rc = -ENOENT;
}
-
-fail:
if (rc) {
- brcmf_txfinalize(fws->drvr, skb, false);
fws->stats.rollback_failed++;
- } else
+ hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
+ brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED,
+ hslot, 0);
+ } else {
fws->stats.rollback_success++;
+ brcmf_fws_return_credits(fws, fifo, 1);
+ brcmf_fws_macdesc_return_req_credit(skb);
+ }
}
static int brcmf_fws_borrow_credit(struct brcmf_fws_info *fws)
{
int lender_ac;
- if (time_after(fws->borrow_defer_timestamp, jiffies))
+ if (time_after(fws->borrow_defer_timestamp, jiffies)) {
+ fws->fifo_credit_map &= ~(1 << BRCMF_FWS_FIFO_AC_BE);
return -ENAVAIL;
+ }
for (lender_ac = 0; lender_ac <= BRCMF_FWS_FIFO_AC_VO; lender_ac++) {
if (fws->fifo_credit[lender_ac]) {
@@ -1702,66 +1671,15 @@ static int brcmf_fws_borrow_credit(struct brcmf_fws_info *fws)
fws->fifo_credit[lender_ac]--;
if (fws->fifo_credit[lender_ac] == 0)
fws->fifo_credit_map &= ~(1 << lender_ac);
- brcmf_dbg(TRACE, "borrow credit from: %d\n", lender_ac);
+ fws->fifo_credit_map |= (1 << BRCMF_FWS_FIFO_AC_BE);
+ brcmf_dbg(DATA, "borrow credit from: %d\n", lender_ac);
return 0;
}
}
+ fws->fifo_credit_map &= ~(1 << BRCMF_FWS_FIFO_AC_BE);
return -ENAVAIL;
}
-static int brcmf_fws_consume_credit(struct brcmf_fws_info *fws, int fifo,
- struct sk_buff *skb)
-{
- struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
- int *credit = &fws->fifo_credit[fifo];
- int use_credit = 1;
-
- brcmf_dbg(TRACE, "enter: ac=%d, credits=%d\n", fifo, *credit);
-
- if (entry->requested_credit > 0) {
- /*
- * if the packet was pulled out while destination is in
- * closed state but had a non-zero packets requested,
- * then this should not count against the FIFO credit.
- * That is due to the fact that the firmware will
- * most likely hold onto this packet until a suitable
- * time later to push it to the appropriate AC FIFO.
- */
- entry->requested_credit--;
- if (entry->state == BRCMF_FWS_STATE_CLOSE)
- use_credit = 0;
- } else if (entry->requested_packet > 0) {
- entry->requested_packet--;
- brcmf_skb_if_flags_set_field(skb, REQUESTED, 1);
- if (entry->state == BRCMF_FWS_STATE_CLOSE)
- use_credit = 0;
- }
- brcmf_skb_if_flags_set_field(skb, CREDITCHECK, use_credit);
- if (!use_credit) {
- brcmf_dbg(TRACE, "exit: no creditcheck set\n");
- return 0;
- }
-
- if (fifo != BRCMF_FWS_FIFO_AC_BE)
- fws->borrow_defer_timestamp = jiffies +
- BRCMF_FWS_BORROW_DEFER_PERIOD;
-
- if (!(*credit)) {
- /* Try to borrow a credit from other queue */
- if (fifo == BRCMF_FWS_FIFO_AC_BE &&
- brcmf_fws_borrow_credit(fws) == 0)
- return 0;
-
- brcmf_dbg(TRACE, "exit: ac=%d, credits depleted\n", fifo);
- return -ENAVAIL;
- }
- (*credit)--;
- if (!(*credit))
- fws->fifo_credit_map &= ~(1 << fifo);
- brcmf_dbg(TRACE, "exit: ac=%d, credits=%d\n", fifo, *credit);
- return 0;
-}
-
static int brcmf_fws_commit_skb(struct brcmf_fws_info *fws, int fifo,
struct sk_buff *skb)
{
@@ -1769,32 +1687,51 @@ static int brcmf_fws_commit_skb(struct brcmf_fws_info *fws, int fifo,
struct brcmf_fws_mac_descriptor *entry;
struct brcmf_bus *bus = fws->drvr->bus_if;
int rc;
+ u8 ifidx;
entry = skcb->mac;
if (IS_ERR(entry))
return PTR_ERR(entry);
- rc = brcmf_fws_precommit_skb(fws, fifo, skb);
+ brcmf_fws_precommit_skb(fws, fifo, skb);
+ rc = brcmf_bus_txdata(bus, skb);
+ brcmf_dbg(DATA, "%s flags %X htod %X bus_tx %d\n", entry->name,
+ skcb->if_flags, skcb->htod, rc);
if (rc < 0) {
- fws->stats.generic_error++;
+ brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb);
goto rollback;
}
- rc = brcmf_bus_txdata(bus, skb);
- if (rc < 0)
- goto rollback;
-
- entry->seq[fifo]++;
+ entry->transit_count++;
+ if (entry->suppressed)
+ entry->suppr_transit_count++;
fws->stats.pkt2bus++;
- if (brcmf_skbcb(skb)->if_flags & BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK) {
- fws->stats.send_pkts[fifo]++;
- fws->stats.fifo_credits_sent[fifo]++;
- }
+ fws->stats.send_pkts[fifo]++;
+ if (brcmf_skb_if_flags_get_field(skb, REQUESTED))
+ fws->stats.requested_sent[fifo]++;
return rc;
rollback:
- brcmf_fws_rollback_toq(fws, skb);
+ brcmf_fws_rollback_toq(fws, skb, fifo);
+ return rc;
+}
+
+static int brcmf_fws_assign_htod(struct brcmf_fws_info *fws, struct sk_buff *p,
+ int fifo)
+{
+ struct brcmf_skbuff_cb *skcb = brcmf_skbcb(p);
+ int rc, hslot;
+
+ hslot = brcmf_fws_hanger_get_free_slot(&fws->hanger);
+ brcmf_skb_htod_tag_set_field(p, HSLOT, hslot);
+ brcmf_skb_htod_tag_set_field(p, FREERUN, skcb->mac->seq[fifo]);
+ brcmf_skb_htod_tag_set_field(p, FIFO, fifo);
+ rc = brcmf_fws_hanger_pushpkt(&fws->hanger, p, hslot);
+ if (!rc)
+ skcb->mac->seq[fifo]++;
+ else
+ fws->stats.generic_error++;
return rc;
}
@@ -1807,13 +1744,14 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
ulong flags;
int fifo = BRCMF_FWS_FIFO_BCMC;
bool multicast = is_multicast_ether_addr(eh->h_dest);
+ bool pae = eh->h_proto == htons(ETH_P_PAE);
/* determine the priority */
if (!skb->priority)
skb->priority = cfg80211_classify8021d(skb);
drvr->tx_multicast += !!multicast;
- if (ntohs(eh->h_proto) == ETH_P_PAE)
+ if (pae)
atomic_inc(&ifp->pend_8021x_cnt);
if (!brcmf_fws_fc_active(fws)) {
@@ -1826,29 +1764,30 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
/* set control buffer information */
skcb->if_flags = 0;
- skcb->mac = brcmf_fws_find_mac_desc(fws, ifp, eh->h_dest);
skcb->state = BRCMF_FWS_SKBSTATE_NEW;
brcmf_skb_if_flags_set_field(skb, INDEX, ifp->ifidx);
if (!multicast)
fifo = brcmf_fws_prio2fifo[skb->priority];
- brcmf_skb_if_flags_set_field(skb, FIFO, fifo);
-
- brcmf_dbg(TRACE, "ea=%pM, multi=%d, fifo=%d\n", eh->h_dest,
- multicast, fifo);
brcmf_fws_lock(drvr, flags);
- if (skcb->mac->suppressed ||
- brcmf_fws_mac_desc_closed(fws, skcb->mac, fifo) ||
- brcmu_pktq_mlen(&skcb->mac->psq, 3 << (fifo * 2)) ||
- (!multicast &&
- brcmf_fws_consume_credit(fws, fifo, skb) < 0)) {
- /* enqueue the packet in delayQ */
- drvr->fws->fifo_delay_map |= 1 << fifo;
+ if (fifo != BRCMF_FWS_FIFO_AC_BE && fifo < BRCMF_FWS_FIFO_BCMC)
+ fws->borrow_defer_timestamp = jiffies +
+ BRCMF_FWS_BORROW_DEFER_PERIOD;
+
+ skcb->mac = brcmf_fws_macdesc_find(fws, ifp, eh->h_dest);
+ brcmf_dbg(DATA, "%s mac %pM multi %d fifo %d\n", skcb->mac->name,
+ eh->h_dest, multicast, fifo);
+ if (!brcmf_fws_assign_htod(fws, skb, fifo)) {
brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_DELAYED, fifo, skb);
+ brcmf_fws_schedule_deq(fws);
} else {
- if (brcmf_fws_commit_skb(fws, fifo, skb))
- if (!multicast)
- brcmf_skb_pick_up_credit(fws, fifo, skb);
+ brcmf_err("drop skb: no hanger slot\n");
+ if (pae) {
+ atomic_dec(&ifp->pend_8021x_cnt);
+ if (waitqueue_active(&ifp->pend_8021x_wait))
+ wake_up(&ifp->pend_8021x_wait);
+ }
+ brcmu_pkt_buf_free_skb(skb);
}
brcmf_fws_unlock(drvr, flags);
return 0;
@@ -1862,7 +1801,7 @@ void brcmf_fws_reset_interface(struct brcmf_if *ifp)
if (!entry)
return;
- brcmf_fws_init_mac_descriptor(entry, ifp->mac_addr, ifp->ifidx);
+ brcmf_fws_macdesc_init(entry, ifp->mac_addr, ifp->ifidx);
}
void brcmf_fws_add_interface(struct brcmf_if *ifp)
@@ -1870,16 +1809,16 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp)
struct brcmf_fws_info *fws = ifp->drvr->fws;
struct brcmf_fws_mac_descriptor *entry;
- brcmf_dbg(TRACE, "enter: idx=%d, mac=%pM\n",
- ifp->bssidx, ifp->mac_addr);
if (!ifp->ndev || !ifp->drvr->fw_signals)
return;
entry = &fws->desc.iface[ifp->ifidx];
ifp->fws_desc = entry;
- brcmf_fws_init_mac_descriptor(entry, ifp->mac_addr, ifp->ifidx);
+ brcmf_fws_macdesc_init(entry, ifp->mac_addr, ifp->ifidx);
+ brcmf_fws_macdesc_set_name(fws, entry);
brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT,
BRCMF_FWS_PSQ_LEN);
+ brcmf_dbg(TRACE, "added %s\n", entry->name);
}
void brcmf_fws_del_interface(struct brcmf_if *ifp)
@@ -1887,13 +1826,13 @@ void brcmf_fws_del_interface(struct brcmf_if *ifp)
struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc;
ulong flags;
- brcmf_dbg(TRACE, "enter: idx=%d\n", ifp->bssidx);
if (!entry)
return;
brcmf_fws_lock(ifp->drvr, flags);
ifp->fws_desc = NULL;
- brcmf_fws_clear_mac_descriptor(entry);
+ brcmf_dbg(TRACE, "deleting %s\n", entry->name);
+ brcmf_fws_macdesc_deinit(entry);
brcmf_fws_cleanup(ifp->drvr->fws, ifp->ifidx);
brcmf_fws_unlock(ifp->drvr, flags);
}
@@ -1904,39 +1843,37 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker)
struct sk_buff *skb;
ulong flags;
int fifo;
- int credit;
fws = container_of(worker, struct brcmf_fws_info, fws_dequeue_work);
- brcmf_dbg(TRACE, "enter: fws=%p\n", fws);
brcmf_fws_lock(fws->drvr, flags);
- for (fifo = NL80211_NUM_ACS; fifo >= 0; fifo--) {
- brcmf_dbg(TRACE, "fifo %d credit %d\n", fifo,
- fws->fifo_credit[fifo]);
- for (credit = 0; credit < fws->fifo_credit[fifo]; /* nop */) {
+ for (fifo = BRCMF_FWS_FIFO_BCMC; fifo >= 0 && !fws->bus_flow_blocked;
+ fifo--) {
+ while ((fws->fifo_credit[fifo]) || ((!fws->bcmc_credit_check) &&
+ (fifo == BRCMF_FWS_FIFO_BCMC))) {
skb = brcmf_fws_deq(fws, fifo);
- if (!skb || brcmf_fws_commit_skb(fws, fifo, skb))
+ if (!skb)
+ break;
+ fws->fifo_credit[fifo]--;
+ if (brcmf_fws_commit_skb(fws, fifo, skb))
+ break;
+ if (fws->bus_flow_blocked)
break;
- if (brcmf_skbcb(skb)->if_flags &
- BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK)
- credit++;
}
if ((fifo == BRCMF_FWS_FIFO_AC_BE) &&
- (credit == fws->fifo_credit[fifo])) {
- fws->fifo_credit[fifo] -= credit;
+ (fws->fifo_credit[fifo] == 0) &&
+ (!fws->bus_flow_blocked)) {
while (brcmf_fws_borrow_credit(fws) == 0) {
skb = brcmf_fws_deq(fws, fifo);
if (!skb) {
brcmf_fws_return_credits(fws, fifo, 1);
break;
}
- if (brcmf_fws_commit_skb(fws, fifo, skb)) {
- brcmf_fws_return_credits(fws, fifo, 1);
+ if (brcmf_fws_commit_skb(fws, fifo, skb))
+ break;
+ if (fws->bus_flow_blocked)
break;
- }
}
- } else {
- fws->fifo_credit[fifo] -= credit;
}
}
brcmf_fws_unlock(fws->drvr, flags);
@@ -1982,6 +1919,13 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
brcmf_err("register credit map handler failed\n");
goto fail;
}
+ rc = brcmf_fweh_register(drvr, BRCMF_E_BCMC_CREDIT_SUPPORT,
+ brcmf_fws_notify_bcmc_credit_support);
+ if (rc < 0) {
+ brcmf_err("register bcmc credit handler failed\n");
+ brcmf_fweh_unregister(drvr, BRCMF_E_FIFO_CREDIT_MAP);
+ goto fail;
+ }
/* setting the iovar may fail if feature is unsupported
* so leave the rc as is so driver initialization can
@@ -1993,19 +1937,20 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
}
brcmf_fws_hanger_init(&drvr->fws->hanger);
- brcmf_fws_init_mac_descriptor(&drvr->fws->desc.other, NULL, 0);
+ brcmf_fws_macdesc_init(&drvr->fws->desc.other, NULL, 0);
+ brcmf_fws_macdesc_set_name(drvr->fws, &drvr->fws->desc.other);
brcmu_pktq_init(&drvr->fws->desc.other.psq, BRCMF_FWS_PSQ_PREC_COUNT,
BRCMF_FWS_PSQ_LEN);
/* create debugfs file for statistics */
brcmf_debugfs_create_fws_stats(drvr, &drvr->fws->stats);
- /* TODO: remove upon feature delivery */
- brcmf_err("%s bdcv2 tlv signaling [%x]\n",
+ brcmf_dbg(INFO, "%s bdcv2 tlv signaling [%x]\n",
drvr->fw_signals ? "enabled" : "disabled", tlv);
return 0;
fail_event:
+ brcmf_fweh_unregister(drvr, BRCMF_E_BCMC_CREDIT_SUPPORT);
brcmf_fweh_unregister(drvr, BRCMF_E_FIFO_CREDIT_MAP);
fail:
brcmf_fws_deinit(drvr);
@@ -2043,25 +1988,31 @@ bool brcmf_fws_fc_active(struct brcmf_fws_info *fws)
if (!fws)
return false;
- brcmf_dbg(TRACE, "enter: mode=%d\n", fws->fcmode);
return fws->fcmode != BRCMF_FWS_FCMODE_NONE;
}
void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb)
{
ulong flags;
+ u32 hslot;
- brcmf_fws_lock(fws->drvr, flags);
- brcmf_fws_txstatus_process(fws, BRCMF_FWS_TXSTATUS_FW_TOSSED,
- brcmf_skb_htod_tag_get_field(skb, HSLOT), 0);
- /* the packet never reached firmware so reclaim credit */
- if (fws->fcmode == BRCMF_FWS_FCMODE_EXPLICIT_CREDIT &&
- brcmf_skbcb(skb)->if_flags & BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK) {
- brcmf_fws_return_credits(fws,
- brcmf_skb_htod_tag_get_field(skb,
- FIFO),
- 1);
- brcmf_fws_schedule_deq(fws);
+ if (brcmf_skbcb(skb)->state == BRCMF_FWS_SKBSTATE_TIM) {
+ brcmu_pkt_buf_free_skb(skb);
+ return;
}
+ brcmf_fws_lock(fws->drvr, flags);
+ hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
+ brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot, 0);
brcmf_fws_unlock(fws->drvr, flags);
}
+
+void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked)
+{
+ struct brcmf_fws_info *fws = drvr->fws;
+
+ fws->bus_flow_blocked = flow_blocked;
+ if (!flow_blocked)
+ brcmf_fws_schedule_deq(fws);
+ else
+ fws->stats.bus_flow_block++;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h
index fbe483d23752..9fc860910bd8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h
@@ -29,5 +29,6 @@ void brcmf_fws_reset_interface(struct brcmf_if *ifp);
void brcmf_fws_add_interface(struct brcmf_if *ifp);
void brcmf_fws_del_interface(struct brcmf_if *ifp);
void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb);
+void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked);
#endif /* FWSIGNAL_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 7c1b6332747e..09786a539950 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -170,7 +170,6 @@ struct brcmf_sdio_dev {
atomic_t suspend; /* suspend flag */
wait_queue_head_t request_byte_wait;
wait_queue_head_t request_word_wait;
- wait_queue_head_t request_chain_wait;
wait_queue_head_t request_buffer_wait;
struct device *dev;
struct brcmf_bus *bus_if;
@@ -230,8 +229,6 @@ brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
#define SDIO_REQ_4BYTE 0x1
/* Fixed address (FIFO) (vs. incrementing address) */
#define SDIO_REQ_FIXED 0x2
-/* Async request (vs. sync request) */
-#define SDIO_REQ_ASYNC 0x4
/* Read/write to memory block (F1, no FIFO) via CMD53 (sync only).
* rw: read or write (0/1)
@@ -252,9 +249,6 @@ extern int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
extern int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
extern int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev);
-extern int brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev,
- u32 address);
-
/* attach, return handler on success, NULL if failed.
* The handler shall be provided by all subsequent calls. No local cache
* cfghdl points to the starting address of pci device mapped memory
@@ -272,16 +266,6 @@ brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
uint rw, uint fnc, uint addr,
u32 *word, uint nbyte);
-/* read or write any buffer using cmd53 */
-extern int
-brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
- uint fix_inc, uint rw, uint fnc_num, u32 addr,
- struct sk_buff *pkt);
-extern int
-brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
- uint write, uint func, uint addr,
- struct sk_buff_head *pktq);
-
/* Watchdog timer interface for pm ops */
extern void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev,
bool enable);
@@ -291,4 +275,8 @@ extern void brcmf_sdbrcm_disconnect(void *ptr);
extern void brcmf_sdbrcm_isr(void *arg);
extern void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
+
+extern void brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
+ wait_queue_head_t *wq);
+extern bool brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev);
#endif /* _BRCM_SDH_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
index 9df1f7a681e0..bc2917112899 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
@@ -87,6 +87,27 @@ TRACE_EVENT(brcmf_hexdump,
TP_printk("hexdump [length=%lu]", __entry->len)
);
+TRACE_EVENT(brcmf_bdchdr,
+ TP_PROTO(void *data),
+ TP_ARGS(data),
+ TP_STRUCT__entry(
+ __field(u8, flags)
+ __field(u8, prio)
+ __field(u8, flags2)
+ __field(u32, siglen)
+ __dynamic_array(u8, signal, *((u8 *)data + 3) * 4)
+ ),
+ TP_fast_assign(
+ __entry->flags = *(u8 *)data;
+ __entry->prio = *((u8 *)data + 1);
+ __entry->flags2 = *((u8 *)data + 2);
+ __entry->siglen = *((u8 *)data + 3) * 4;
+ memcpy(__get_dynamic_array(signal),
+ (u8 *)data + 4, __entry->siglen);
+ ),
+ TP_printk("bdc: prio=%d siglen=%d", __entry->prio, __entry->siglen)
+);
+
#ifdef CONFIG_BRCM_TRACING
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 01aed7ad6bec..322cadc51ded 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -82,6 +82,7 @@ struct brcmf_usbdev_info {
int tx_high_watermark;
int tx_freecount;
bool tx_flowblock;
+ spinlock_t tx_flowblock_lock;
struct brcmf_usbreq *tx_reqs;
struct brcmf_usbreq *rx_reqs;
@@ -411,6 +412,7 @@ static void brcmf_usb_tx_complete(struct urb *urb)
{
struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context;
struct brcmf_usbdev_info *devinfo = req->devinfo;
+ unsigned long flags;
brcmf_dbg(USB, "Enter, urb->status=%d, skb=%p\n", urb->status,
req->skb);
@@ -419,11 +421,13 @@ static void brcmf_usb_tx_complete(struct urb *urb)
brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0);
req->skb = NULL;
brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, &devinfo->tx_freecount);
+ spin_lock_irqsave(&devinfo->tx_flowblock_lock, flags);
if (devinfo->tx_freecount > devinfo->tx_high_watermark &&
devinfo->tx_flowblock) {
brcmf_txflowblock(devinfo->dev, false);
devinfo->tx_flowblock = false;
}
+ spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags);
}
static void brcmf_usb_rx_complete(struct urb *urb)
@@ -568,6 +572,7 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
struct brcmf_usbreq *req;
int ret;
+ unsigned long flags;
brcmf_dbg(USB, "Enter, skb=%p\n", skb);
if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) {
@@ -599,11 +604,13 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
goto fail;
}
+ spin_lock_irqsave(&devinfo->tx_flowblock_lock, flags);
if (devinfo->tx_freecount < devinfo->tx_low_watermark &&
!devinfo->tx_flowblock) {
brcmf_txflowblock(dev, true);
devinfo->tx_flowblock = true;
}
+ spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags);
return 0;
fail:
@@ -1164,6 +1171,7 @@ struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo,
/* Initialize the spinlocks */
spin_lock_init(&devinfo->qlock);
+ spin_lock_init(&devinfo->tx_flowblock_lock);
INIT_LIST_HEAD(&devinfo->rx_freeq);
INIT_LIST_HEAD(&devinfo->rx_postq);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 301e572e8923..7fa71f73cfe8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -1093,8 +1093,11 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif)
brcmf_dbg(INFO, "Call WLC_DISASSOC to stop excess roaming\n ");
err = brcmf_fil_cmd_data_set(vif->ifp,
BRCMF_C_DISASSOC, NULL, 0);
- if (err)
+ if (err) {
brcmf_err("WLC_DISASSOC failed (%d)\n", err);
+ cfg80211_disconnected(vif->wdev.netdev, 0,
+ NULL, 0, GFP_KERNEL);
+ }
clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state);
}
clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state);
@@ -3982,6 +3985,7 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct brcmf_fil_af_params_le *af_params;
bool ack;
s32 chan_nr;
+ u32 freq;
brcmf_dbg(TRACE, "Enter\n");
@@ -3994,6 +3998,8 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
return -EPERM;
}
+ vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
+
if (ieee80211_is_probe_resp(mgmt->frame_control)) {
/* Right now the only reason to get a probe response */
/* is for p2p listen response or for p2p GO from */
@@ -4009,7 +4015,6 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
ie_offset = DOT11_MGMT_HDR_LEN +
DOT11_BCN_PRB_FIXED_LEN;
ie_len = len - ie_offset;
- vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif)
vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
err = brcmf_vif_set_mgmt_ie(vif,
@@ -4033,16 +4038,22 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
memcpy(&af_params->bssid[0], &mgmt->bssid[0], ETH_ALEN);
/* Add the length exepted for 802.11 header */
action_frame->len = cpu_to_le16(len - DOT11_MGMT_HDR_LEN);
- /* Add the channel */
- chan_nr = ieee80211_frequency_to_channel(chan->center_freq);
+ /* Add the channel. Use the one specified as parameter if any or
+ * the current one (got from the firmware) otherwise
+ */
+ if (chan)
+ freq = chan->center_freq;
+ else
+ brcmf_fil_cmd_int_get(vif->ifp, BRCMF_C_GET_CHANNEL,
+ &freq);
+ chan_nr = ieee80211_frequency_to_channel(freq);
af_params->channel = cpu_to_le32(chan_nr);
memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN],
le16_to_cpu(action_frame->len));
brcmf_dbg(TRACE, "Action frame, cookie=%lld, len=%d, freq=%d\n",
- *cookie, le16_to_cpu(action_frame->len),
- chan->center_freq);
+ *cookie, le16_to_cpu(action_frame->len), freq);
ack = brcmf_p2p_send_action_frame(cfg, cfg_to_ndev(cfg),
af_params);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
index 1585cc5bf866..bd982856d385 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
@@ -900,7 +900,7 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
if (supr_status) {
update_rate = false;
if (supr_status == TX_STATUS_SUPR_BADCH) {
- brcms_err(wlc->hw->d11core,
+ brcms_dbg_ht(wlc->hw->d11core,
"%s: Pkt tx suppressed, illegal channel possibly %d\n",
__func__, CHSPEC_CHANNEL(
wlc->default_bss->chanspec));
diff --git a/drivers/net/wireless/cw1200/Kconfig b/drivers/net/wireless/cw1200/Kconfig
new file mode 100644
index 000000000000..0880742eab17
--- /dev/null
+++ b/drivers/net/wireless/cw1200/Kconfig
@@ -0,0 +1,30 @@
+config CW1200
+ tristate "CW1200 WLAN support"
+ depends on MAC80211 && CFG80211
+ help
+ This is a driver for the ST-E CW1100 & CW1200 WLAN chipsets.
+ This option just enables the driver core, see below for
+ specific bus support.
+
+if CW1200
+
+config CW1200_WLAN_SDIO
+ tristate "Support SDIO platforms"
+ depends on CW1200 && MMC
+ help
+ Enable support for the CW1200 connected via an SDIO bus.
+ By default this driver only supports the Sagrad SG901-1091/1098 EVK
+ and similar designs that utilize a hardware reset circuit. To
+ support different CW1200 SDIO designs you will need to override
+ the default platform data by calling cw1200_sdio_set_platform_data()
+ in your board setup file.
+
+config CW1200_WLAN_SPI
+ tristate "Support SPI platforms"
+ depends on CW1200 && SPI
+ help
+ Enables support for the CW1200 connected via a SPI bus. You will
+ need to add appropriate platform data glue in your board setup
+ file.
+
+endif
diff --git a/drivers/net/wireless/cw1200/Makefile b/drivers/net/wireless/cw1200/Makefile
new file mode 100644
index 000000000000..b086aac6547a
--- /dev/null
+++ b/drivers/net/wireless/cw1200/Makefile
@@ -0,0 +1,21 @@
+cw1200_core-y := \
+ fwio.o \
+ txrx.o \
+ main.o \
+ queue.o \
+ hwio.o \
+ bh.o \
+ wsm.o \
+ sta.o \
+ scan.o \
+ debug.o
+cw1200_core-$(CONFIG_PM) += pm.o
+
+# CFLAGS_sta.o += -DDEBUG
+
+cw1200_wlan_sdio-y := cw1200_sdio.o
+cw1200_wlan_spi-y := cw1200_spi.o
+
+obj-$(CONFIG_CW1200) += cw1200_core.o
+obj-$(CONFIG_CW1200_WLAN_SDIO) += cw1200_wlan_sdio.o
+obj-$(CONFIG_CW1200_WLAN_SPI) += cw1200_wlan_spi.o
diff --git a/drivers/net/wireless/cw1200/bh.c b/drivers/net/wireless/cw1200/bh.c
new file mode 100644
index 000000000000..c1ec2a4dd8c0
--- /dev/null
+++ b/drivers/net/wireless/cw1200/bh.c
@@ -0,0 +1,619 @@
+/*
+ * Device handling thread implementation for mac80211 ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * Based on:
+ * ST-Ericsson UMAC CW1200 driver, which is
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Ajitpal Singh <ajitpal.singh@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <net/mac80211.h>
+#include <linux/kthread.h>
+#include <linux/timer.h>
+
+#include "cw1200.h"
+#include "bh.h"
+#include "hwio.h"
+#include "wsm.h"
+#include "hwbus.h"
+#include "debug.h"
+#include "fwio.h"
+
+static int cw1200_bh(void *arg);
+
+#define DOWNLOAD_BLOCK_SIZE_WR (0x1000 - 4)
+/* an SPI message cannot be bigger than (2"12-1)*2 bytes
+ * "*2" to cvt to bytes
+ */
+#define MAX_SZ_RD_WR_BUFFERS (DOWNLOAD_BLOCK_SIZE_WR*2)
+#define PIGGYBACK_CTRL_REG (2)
+#define EFFECTIVE_BUF_SIZE (MAX_SZ_RD_WR_BUFFERS - PIGGYBACK_CTRL_REG)
+
+/* Suspend state privates */
+enum cw1200_bh_pm_state {
+ CW1200_BH_RESUMED = 0,
+ CW1200_BH_SUSPEND,
+ CW1200_BH_SUSPENDED,
+ CW1200_BH_RESUME,
+};
+
+typedef int (*cw1200_wsm_handler)(struct cw1200_common *priv,
+ u8 *data, size_t size);
+
+static void cw1200_bh_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, bh_work);
+ cw1200_bh(priv);
+}
+
+int cw1200_register_bh(struct cw1200_common *priv)
+{
+ int err = 0;
+ /* Realtime workqueue */
+ priv->bh_workqueue = alloc_workqueue("cw1200_bh",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI
+ | WQ_CPU_INTENSIVE, 1);
+
+ if (!priv->bh_workqueue)
+ return -ENOMEM;
+
+ INIT_WORK(&priv->bh_work, cw1200_bh_work);
+
+ pr_debug("[BH] register.\n");
+
+ atomic_set(&priv->bh_rx, 0);
+ atomic_set(&priv->bh_tx, 0);
+ atomic_set(&priv->bh_term, 0);
+ atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED);
+ priv->bh_error = 0;
+ priv->hw_bufs_used = 0;
+ priv->buf_id_tx = 0;
+ priv->buf_id_rx = 0;
+ init_waitqueue_head(&priv->bh_wq);
+ init_waitqueue_head(&priv->bh_evt_wq);
+
+ err = !queue_work(priv->bh_workqueue, &priv->bh_work);
+ WARN_ON(err);
+ return err;
+}
+
+void cw1200_unregister_bh(struct cw1200_common *priv)
+{
+ atomic_add(1, &priv->bh_term);
+ wake_up(&priv->bh_wq);
+
+ flush_workqueue(priv->bh_workqueue);
+
+ destroy_workqueue(priv->bh_workqueue);
+ priv->bh_workqueue = NULL;
+
+ pr_debug("[BH] unregistered.\n");
+}
+
+void cw1200_irq_handler(struct cw1200_common *priv)
+{
+ pr_debug("[BH] irq.\n");
+
+ /* Disable Interrupts! */
+ /* NOTE: hwbus_ops->lock already held */
+ __cw1200_irq_enable(priv, 0);
+
+ if (/* WARN_ON */(priv->bh_error))
+ return;
+
+ if (atomic_add_return(1, &priv->bh_rx) == 1)
+ wake_up(&priv->bh_wq);
+}
+EXPORT_SYMBOL_GPL(cw1200_irq_handler);
+
+void cw1200_bh_wakeup(struct cw1200_common *priv)
+{
+ pr_debug("[BH] wakeup.\n");
+ if (priv->bh_error) {
+ pr_err("[BH] wakeup failed (BH error)\n");
+ return;
+ }
+
+ if (atomic_add_return(1, &priv->bh_tx) == 1)
+ wake_up(&priv->bh_wq);
+}
+
+int cw1200_bh_suspend(struct cw1200_common *priv)
+{
+ pr_debug("[BH] suspend.\n");
+ if (priv->bh_error) {
+ wiphy_warn(priv->hw->wiphy, "BH error -- can't suspend\n");
+ return -EINVAL;
+ }
+
+ atomic_set(&priv->bh_suspend, CW1200_BH_SUSPEND);
+ wake_up(&priv->bh_wq);
+ return wait_event_timeout(priv->bh_evt_wq, priv->bh_error ||
+ (CW1200_BH_SUSPENDED == atomic_read(&priv->bh_suspend)),
+ 1 * HZ) ? 0 : -ETIMEDOUT;
+}
+
+int cw1200_bh_resume(struct cw1200_common *priv)
+{
+ pr_debug("[BH] resume.\n");
+ if (priv->bh_error) {
+ wiphy_warn(priv->hw->wiphy, "BH error -- can't resume\n");
+ return -EINVAL;
+ }
+
+ atomic_set(&priv->bh_suspend, CW1200_BH_RESUME);
+ wake_up(&priv->bh_wq);
+ return wait_event_timeout(priv->bh_evt_wq, priv->bh_error ||
+ (CW1200_BH_RESUMED == atomic_read(&priv->bh_suspend)),
+ 1 * HZ) ? 0 : -ETIMEDOUT;
+}
+
+static inline void wsm_alloc_tx_buffer(struct cw1200_common *priv)
+{
+ ++priv->hw_bufs_used;
+}
+
+int wsm_release_tx_buffer(struct cw1200_common *priv, int count)
+{
+ int ret = 0;
+ int hw_bufs_used = priv->hw_bufs_used;
+
+ priv->hw_bufs_used -= count;
+ if (WARN_ON(priv->hw_bufs_used < 0))
+ ret = -1;
+ else if (hw_bufs_used >= priv->wsm_caps.input_buffers)
+ ret = 1;
+ if (!priv->hw_bufs_used)
+ wake_up(&priv->bh_evt_wq);
+ return ret;
+}
+
+static int cw1200_bh_read_ctrl_reg(struct cw1200_common *priv,
+ u16 *ctrl_reg)
+{
+ int ret;
+
+ ret = cw1200_reg_read_16(priv,
+ ST90TDS_CONTROL_REG_ID, ctrl_reg);
+ if (ret) {
+ ret = cw1200_reg_read_16(priv,
+ ST90TDS_CONTROL_REG_ID, ctrl_reg);
+ if (ret)
+ pr_err("[BH] Failed to read control register.\n");
+ }
+
+ return ret;
+}
+
+static int cw1200_device_wakeup(struct cw1200_common *priv)
+{
+ u16 ctrl_reg;
+ int ret;
+
+ pr_debug("[BH] Device wakeup.\n");
+
+ /* First, set the dpll register */
+ ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID,
+ cw1200_dpll_from_clk(priv->hw_refclk));
+ if (WARN_ON(ret))
+ return ret;
+
+ /* To force the device to be always-on, the host sets WLAN_UP to 1 */
+ ret = cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID,
+ ST90TDS_CONT_WUP_BIT);
+ if (WARN_ON(ret))
+ return ret;
+
+ ret = cw1200_bh_read_ctrl_reg(priv, &ctrl_reg);
+ if (WARN_ON(ret))
+ return ret;
+
+ /* If the device returns WLAN_RDY as 1, the device is active and will
+ * remain active.
+ */
+ if (ctrl_reg & ST90TDS_CONT_RDY_BIT) {
+ pr_debug("[BH] Device awake.\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Must be called from BH thraed. */
+void cw1200_enable_powersave(struct cw1200_common *priv,
+ bool enable)
+{
+ pr_debug("[BH] Powerave is %s.\n",
+ enable ? "enabled" : "disabled");
+ priv->powersave_enabled = enable;
+}
+
+static int cw1200_bh_rx_helper(struct cw1200_common *priv,
+ uint16_t *ctrl_reg,
+ int *tx)
+{
+ size_t read_len = 0;
+ struct sk_buff *skb_rx = NULL;
+ struct wsm_hdr *wsm;
+ size_t wsm_len;
+ u16 wsm_id;
+ u8 wsm_seq;
+ int rx_resync = 1;
+
+ size_t alloc_len;
+ u8 *data;
+
+ read_len = (*ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) * 2;
+ if (!read_len)
+ return 0; /* No more work */
+
+ if (WARN_ON((read_len < sizeof(struct wsm_hdr)) ||
+ (read_len > EFFECTIVE_BUF_SIZE))) {
+ pr_debug("Invalid read len: %zu (%04x)",
+ read_len, *ctrl_reg);
+ goto err;
+ }
+
+ /* Add SIZE of PIGGYBACK reg (CONTROL Reg)
+ * to the NEXT Message length + 2 Bytes for SKB
+ */
+ read_len = read_len + 2;
+
+ alloc_len = priv->hwbus_ops->align_size(
+ priv->hwbus_priv, read_len);
+
+ /* Check if not exceeding CW1200 capabilities */
+ if (WARN_ON_ONCE(alloc_len > EFFECTIVE_BUF_SIZE)) {
+ pr_debug("Read aligned len: %zu\n",
+ alloc_len);
+ }
+
+ skb_rx = dev_alloc_skb(alloc_len);
+ if (WARN_ON(!skb_rx))
+ goto err;
+
+ skb_trim(skb_rx, 0);
+ skb_put(skb_rx, read_len);
+ data = skb_rx->data;
+ if (WARN_ON(!data))
+ goto err;
+
+ if (WARN_ON(cw1200_data_read(priv, data, alloc_len))) {
+ pr_err("rx blew up, len %zu\n", alloc_len);
+ goto err;
+ }
+
+ /* Piggyback */
+ *ctrl_reg = __le16_to_cpu(
+ ((__le16 *)data)[alloc_len / 2 - 1]);
+
+ wsm = (struct wsm_hdr *)data;
+ wsm_len = __le16_to_cpu(wsm->len);
+ if (WARN_ON(wsm_len > read_len))
+ goto err;
+
+ if (priv->wsm_enable_wsm_dumps)
+ print_hex_dump_bytes("<-- ",
+ DUMP_PREFIX_NONE,
+ data, wsm_len);
+
+ wsm_id = __le16_to_cpu(wsm->id) & 0xFFF;
+ wsm_seq = (__le16_to_cpu(wsm->id) >> 13) & 7;
+
+ skb_trim(skb_rx, wsm_len);
+
+ if (wsm_id == 0x0800) {
+ wsm_handle_exception(priv,
+ &data[sizeof(*wsm)],
+ wsm_len - sizeof(*wsm));
+ goto err;
+ } else if (!rx_resync) {
+ if (WARN_ON(wsm_seq != priv->wsm_rx_seq))
+ goto err;
+ }
+ priv->wsm_rx_seq = (wsm_seq + 1) & 7;
+ rx_resync = 0;
+
+ if (wsm_id & 0x0400) {
+ int rc = wsm_release_tx_buffer(priv, 1);
+ if (WARN_ON(rc < 0))
+ return rc;
+ else if (rc > 0)
+ *tx = 1;
+ }
+
+ /* cw1200_wsm_rx takes care on SKB livetime */
+ if (WARN_ON(wsm_handle_rx(priv, wsm_id, wsm, &skb_rx)))
+ goto err;
+
+ if (skb_rx) {
+ dev_kfree_skb(skb_rx);
+ skb_rx = NULL;
+ }
+
+ return 0;
+
+err:
+ if (skb_rx) {
+ dev_kfree_skb(skb_rx);
+ skb_rx = NULL;
+ }
+ return -1;
+}
+
+static int cw1200_bh_tx_helper(struct cw1200_common *priv,
+ int *pending_tx,
+ int *tx_burst)
+{
+ size_t tx_len;
+ u8 *data;
+ int ret;
+ struct wsm_hdr *wsm;
+
+ if (priv->device_can_sleep) {
+ ret = cw1200_device_wakeup(priv);
+ if (WARN_ON(ret < 0)) { /* Error in wakeup */
+ *pending_tx = 1;
+ return 0;
+ } else if (ret) { /* Woke up */
+ priv->device_can_sleep = false;
+ } else { /* Did not awake */
+ *pending_tx = 1;
+ return 0;
+ }
+ }
+
+ wsm_alloc_tx_buffer(priv);
+ ret = wsm_get_tx(priv, &data, &tx_len, tx_burst);
+ if (ret <= 0) {
+ wsm_release_tx_buffer(priv, 1);
+ if (WARN_ON(ret < 0))
+ return ret; /* Error */
+ return 0; /* No work */
+ }
+
+ wsm = (struct wsm_hdr *)data;
+ BUG_ON(tx_len < sizeof(*wsm));
+ BUG_ON(__le16_to_cpu(wsm->len) != tx_len);
+
+ atomic_add(1, &priv->bh_tx);
+
+ tx_len = priv->hwbus_ops->align_size(
+ priv->hwbus_priv, tx_len);
+
+ /* Check if not exceeding CW1200 capabilities */
+ if (WARN_ON_ONCE(tx_len > EFFECTIVE_BUF_SIZE))
+ pr_debug("Write aligned len: %zu\n", tx_len);
+
+ wsm->id &= __cpu_to_le16(0xffff ^ WSM_TX_SEQ(WSM_TX_SEQ_MAX));
+ wsm->id |= __cpu_to_le16(WSM_TX_SEQ(priv->wsm_tx_seq));
+
+ if (WARN_ON(cw1200_data_write(priv, data, tx_len))) {
+ pr_err("tx blew up, len %zu\n", tx_len);
+ wsm_release_tx_buffer(priv, 1);
+ return -1; /* Error */
+ }
+
+ if (priv->wsm_enable_wsm_dumps)
+ print_hex_dump_bytes("--> ",
+ DUMP_PREFIX_NONE,
+ data,
+ __le16_to_cpu(wsm->len));
+
+ wsm_txed(priv, data);
+ priv->wsm_tx_seq = (priv->wsm_tx_seq + 1) & WSM_TX_SEQ_MAX;
+
+ if (*tx_burst > 1) {
+ cw1200_debug_tx_burst(priv);
+ return 1; /* Work remains */
+ }
+
+ return 0;
+}
+
+static int cw1200_bh(void *arg)
+{
+ struct cw1200_common *priv = arg;
+ int rx, tx, term, suspend;
+ u16 ctrl_reg = 0;
+ int tx_allowed;
+ int pending_tx = 0;
+ int tx_burst;
+ long status;
+ u32 dummy;
+ int ret;
+
+ for (;;) {
+ if (!priv->hw_bufs_used &&
+ priv->powersave_enabled &&
+ !priv->device_can_sleep &&
+ !atomic_read(&priv->recent_scan)) {
+ status = 1 * HZ;
+ pr_debug("[BH] Device wakedown. No data.\n");
+ cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID, 0);
+ priv->device_can_sleep = true;
+ } else if (priv->hw_bufs_used) {
+ /* Interrupt loss detection */
+ status = 1 * HZ;
+ } else {
+ status = MAX_SCHEDULE_TIMEOUT;
+ }
+
+ /* Dummy Read for SDIO retry mechanism*/
+ if ((priv->hw_type != -1) &&
+ (atomic_read(&priv->bh_rx) == 0) &&
+ (atomic_read(&priv->bh_tx) == 0))
+ cw1200_reg_read(priv, ST90TDS_CONFIG_REG_ID,
+ &dummy, sizeof(dummy));
+
+ pr_debug("[BH] waiting ...\n");
+ status = wait_event_interruptible_timeout(priv->bh_wq, ({
+ rx = atomic_xchg(&priv->bh_rx, 0);
+ tx = atomic_xchg(&priv->bh_tx, 0);
+ term = atomic_xchg(&priv->bh_term, 0);
+ suspend = pending_tx ?
+ 0 : atomic_read(&priv->bh_suspend);
+ (rx || tx || term || suspend || priv->bh_error);
+ }), status);
+
+ pr_debug("[BH] - rx: %d, tx: %d, term: %d, suspend: %d, status: %ld\n",
+ rx, tx, term, suspend, status);
+
+ /* Did an error occur? */
+ if ((status < 0 && status != -ERESTARTSYS) ||
+ term || priv->bh_error) {
+ break;
+ }
+ if (!status) { /* wait_event timed out */
+ unsigned long timestamp = jiffies;
+ long timeout;
+ int pending = 0;
+ int i;
+
+ /* Check to see if we have any outstanding frames */
+ if (priv->hw_bufs_used && (!rx || !tx)) {
+ wiphy_warn(priv->hw->wiphy,
+ "Missed interrupt? (%d frames outstanding)\n",
+ priv->hw_bufs_used);
+ rx = 1;
+
+ /* Get a timestamp of "oldest" frame */
+ for (i = 0; i < 4; ++i)
+ pending += cw1200_queue_get_xmit_timestamp(
+ &priv->tx_queue[i],
+ &timestamp,
+ priv->pending_frame_id);
+
+ /* Check if frame transmission is timed out.
+ * Add an extra second with respect to possible
+ * interrupt loss.
+ */
+ timeout = timestamp +
+ WSM_CMD_LAST_CHANCE_TIMEOUT +
+ 1 * HZ -
+ jiffies;
+
+ /* And terminate BH thread if the frame is "stuck" */
+ if (pending && timeout < 0) {
+ wiphy_warn(priv->hw->wiphy,
+ "Timeout waiting for TX confirm (%d/%d pending, %ld vs %lu).\n",
+ priv->hw_bufs_used, pending,
+ timestamp, jiffies);
+ break;
+ }
+ } else if (!priv->device_can_sleep &&
+ !atomic_read(&priv->recent_scan)) {
+ pr_debug("[BH] Device wakedown. Timeout.\n");
+ cw1200_reg_write_16(priv,
+ ST90TDS_CONTROL_REG_ID, 0);
+ priv->device_can_sleep = true;
+ }
+ goto done;
+ } else if (suspend) {
+ pr_debug("[BH] Device suspend.\n");
+ if (priv->powersave_enabled) {
+ pr_debug("[BH] Device wakedown. Suspend.\n");
+ cw1200_reg_write_16(priv,
+ ST90TDS_CONTROL_REG_ID, 0);
+ priv->device_can_sleep = true;
+ }
+
+ atomic_set(&priv->bh_suspend, CW1200_BH_SUSPENDED);
+ wake_up(&priv->bh_evt_wq);
+ status = wait_event_interruptible(priv->bh_wq,
+ CW1200_BH_RESUME == atomic_read(&priv->bh_suspend));
+ if (status < 0) {
+ wiphy_err(priv->hw->wiphy,
+ "Failed to wait for resume: %ld.\n",
+ status);
+ break;
+ }
+ pr_debug("[BH] Device resume.\n");
+ atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED);
+ wake_up(&priv->bh_evt_wq);
+ atomic_add(1, &priv->bh_rx);
+ goto done;
+ }
+
+ rx:
+ tx += pending_tx;
+ pending_tx = 0;
+
+ if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg))
+ break;
+
+ /* Don't bother trying to rx unless we have data to read */
+ if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) {
+ ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx);
+ if (ret < 0)
+ break;
+ /* Double up here if there's more data.. */
+ if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) {
+ ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx);
+ if (ret < 0)
+ break;
+ }
+ }
+
+ tx:
+ if (tx) {
+ tx = 0;
+
+ BUG_ON(priv->hw_bufs_used > priv->wsm_caps.input_buffers);
+ tx_burst = priv->wsm_caps.input_buffers - priv->hw_bufs_used;
+ tx_allowed = tx_burst > 0;
+
+ if (!tx_allowed) {
+ /* Buffers full. Ensure we process tx
+ * after we handle rx..
+ */
+ pending_tx = tx;
+ goto done_rx;
+ }
+ ret = cw1200_bh_tx_helper(priv, &pending_tx, &tx_burst);
+ if (ret < 0)
+ break;
+ if (ret > 0) /* More to transmit */
+ tx = ret;
+
+ /* Re-read ctrl reg */
+ if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg))
+ break;
+ }
+
+ done_rx:
+ if (priv->bh_error)
+ break;
+ if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK)
+ goto rx;
+ if (tx)
+ goto tx;
+
+ done:
+ /* Re-enable device interrupts */
+ priv->hwbus_ops->lock(priv->hwbus_priv);
+ __cw1200_irq_enable(priv, 1);
+ priv->hwbus_ops->unlock(priv->hwbus_priv);
+ }
+
+ /* Explicitly disable device interrupts */
+ priv->hwbus_ops->lock(priv->hwbus_priv);
+ __cw1200_irq_enable(priv, 0);
+ priv->hwbus_ops->unlock(priv->hwbus_priv);
+
+ if (!term) {
+ pr_err("[BH] Fatal error, exiting.\n");
+ priv->bh_error = 1;
+ /* TODO: schedule_work(recovery) */
+ }
+ return 0;
+}
diff --git a/drivers/net/wireless/cw1200/bh.h b/drivers/net/wireless/cw1200/bh.h
new file mode 100644
index 000000000000..af6a4853728f
--- /dev/null
+++ b/drivers/net/wireless/cw1200/bh.h
@@ -0,0 +1,28 @@
+/*
+ * Device handling thread interface for mac80211 ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CW1200_BH_H
+#define CW1200_BH_H
+
+/* extern */ struct cw1200_common;
+
+int cw1200_register_bh(struct cw1200_common *priv);
+void cw1200_unregister_bh(struct cw1200_common *priv);
+void cw1200_irq_handler(struct cw1200_common *priv);
+void cw1200_bh_wakeup(struct cw1200_common *priv);
+int cw1200_bh_suspend(struct cw1200_common *priv);
+int cw1200_bh_resume(struct cw1200_common *priv);
+/* Must be called from BH thread. */
+void cw1200_enable_powersave(struct cw1200_common *priv,
+ bool enable);
+int wsm_release_tx_buffer(struct cw1200_common *priv, int count);
+
+#endif /* CW1200_BH_H */
diff --git a/drivers/net/wireless/cw1200/cw1200.h b/drivers/net/wireless/cw1200/cw1200.h
new file mode 100644
index 000000000000..1ad7d3602520
--- /dev/null
+++ b/drivers/net/wireless/cw1200/cw1200.h
@@ -0,0 +1,323 @@
+/*
+ * Common private data for ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * Based on the mac80211 Prism54 code, which is
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ *
+ * Based on the islsm (softmac prism54) driver, which is:
+ * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CW1200_H
+#define CW1200_H
+
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <net/mac80211.h>
+
+#include "queue.h"
+#include "wsm.h"
+#include "scan.h"
+#include "txrx.h"
+#include "pm.h"
+
+/* Forward declarations */
+struct hwbus_ops;
+struct task_struct;
+struct cw1200_debug_priv;
+struct firmware;
+
+#define CW1200_MAX_CTRL_FRAME_LEN (0x1000)
+
+#define CW1200_MAX_STA_IN_AP_MODE (5)
+#define CW1200_LINK_ID_AFTER_DTIM (CW1200_MAX_STA_IN_AP_MODE + 1)
+#define CW1200_LINK_ID_UAPSD (CW1200_MAX_STA_IN_AP_MODE + 2)
+#define CW1200_LINK_ID_MAX (CW1200_MAX_STA_IN_AP_MODE + 3)
+#define CW1200_MAX_REQUEUE_ATTEMPTS (5)
+
+#define CW1200_MAX_TID (8)
+
+#define CW1200_BLOCK_ACK_CNT (30)
+#define CW1200_BLOCK_ACK_THLD (800)
+#define CW1200_BLOCK_ACK_HIST (3)
+#define CW1200_BLOCK_ACK_INTERVAL (1 * HZ / CW1200_BLOCK_ACK_HIST)
+
+#define CW1200_JOIN_TIMEOUT (1 * HZ)
+#define CW1200_AUTH_TIMEOUT (5 * HZ)
+
+struct cw1200_ht_info {
+ struct ieee80211_sta_ht_cap ht_cap;
+ enum nl80211_channel_type channel_type;
+ u16 operation_mode;
+};
+
+/* Please keep order */
+enum cw1200_join_status {
+ CW1200_JOIN_STATUS_PASSIVE = 0,
+ CW1200_JOIN_STATUS_MONITOR,
+ CW1200_JOIN_STATUS_JOINING,
+ CW1200_JOIN_STATUS_PRE_STA,
+ CW1200_JOIN_STATUS_STA,
+ CW1200_JOIN_STATUS_IBSS,
+ CW1200_JOIN_STATUS_AP,
+};
+
+enum cw1200_link_status {
+ CW1200_LINK_OFF,
+ CW1200_LINK_RESERVE,
+ CW1200_LINK_SOFT,
+ CW1200_LINK_HARD,
+ CW1200_LINK_RESET,
+ CW1200_LINK_RESET_REMAP,
+};
+
+extern int cw1200_power_mode;
+extern const char * const cw1200_fw_types[];
+
+struct cw1200_link_entry {
+ unsigned long timestamp;
+ enum cw1200_link_status status;
+ enum cw1200_link_status prev_status;
+ u8 mac[ETH_ALEN];
+ u8 buffered[CW1200_MAX_TID];
+ struct sk_buff_head rx_queue;
+};
+
+struct cw1200_common {
+ /* interfaces to the rest of the stack */
+ struct ieee80211_hw *hw;
+ struct ieee80211_vif *vif;
+ struct device *pdev;
+
+ /* Statistics */
+ struct ieee80211_low_level_stats stats;
+
+ /* Our macaddr */
+ u8 mac_addr[ETH_ALEN];
+
+ /* Hardware interface */
+ const struct hwbus_ops *hwbus_ops;
+ struct hwbus_priv *hwbus_priv;
+
+ /* Hardware information */
+ enum {
+ HIF_9000_SILICON_VERSATILE = 0,
+ HIF_8601_VERSATILE,
+ HIF_8601_SILICON,
+ } hw_type;
+ enum {
+ CW1200_HW_REV_CUT10 = 10,
+ CW1200_HW_REV_CUT11 = 11,
+ CW1200_HW_REV_CUT20 = 20,
+ CW1200_HW_REV_CUT22 = 22,
+ CW1X60_HW_REV = 40,
+ } hw_revision;
+ int hw_refclk;
+ bool hw_have_5ghz;
+ const struct firmware *sdd;
+ char *sdd_path;
+
+ struct cw1200_debug_priv *debug;
+
+ struct workqueue_struct *workqueue;
+ struct mutex conf_mutex;
+
+ struct cw1200_queue tx_queue[4];
+ struct cw1200_queue_stats tx_queue_stats;
+ int tx_burst_idx;
+
+ /* firmware/hardware info */
+ unsigned int tx_hdr_len;
+
+ /* Radio data */
+ int output_power;
+
+ /* BBP/MAC state */
+ struct ieee80211_rate *rates;
+ struct ieee80211_rate *mcs_rates;
+ struct ieee80211_channel *channel;
+ struct wsm_edca_params edca;
+ struct wsm_tx_queue_params tx_queue_params;
+ struct wsm_mib_association_mode association_mode;
+ struct wsm_set_bss_params bss_params;
+ struct cw1200_ht_info ht_info;
+ struct wsm_set_pm powersave_mode;
+ struct wsm_set_pm firmware_ps_mode;
+ int cqm_rssi_thold;
+ unsigned cqm_rssi_hyst;
+ bool cqm_use_rssi;
+ int cqm_beacon_loss_count;
+ int channel_switch_in_progress;
+ wait_queue_head_t channel_switch_done;
+ u8 long_frame_max_tx_count;
+ u8 short_frame_max_tx_count;
+ int mode;
+ bool enable_beacon;
+ int beacon_int;
+ bool listening;
+ struct wsm_rx_filter rx_filter;
+ struct wsm_mib_multicast_filter multicast_filter;
+ bool has_multicast_subscription;
+ bool disable_beacon_filter;
+ struct work_struct update_filtering_work;
+ struct work_struct set_beacon_wakeup_period_work;
+
+ u8 ba_rx_tid_mask;
+ u8 ba_tx_tid_mask;
+
+ struct cw1200_pm_state pm_state;
+
+ struct wsm_p2p_ps_modeinfo p2p_ps_modeinfo;
+ struct wsm_uapsd_info uapsd_info;
+ bool setbssparams_done;
+ bool bt_present;
+ u8 conf_listen_interval;
+ u32 listen_interval;
+ u32 erp_info;
+ u32 rts_threshold;
+
+ /* BH */
+ atomic_t bh_rx;
+ atomic_t bh_tx;
+ atomic_t bh_term;
+ atomic_t bh_suspend;
+
+ struct workqueue_struct *bh_workqueue;
+ struct work_struct bh_work;
+
+ int bh_error;
+ wait_queue_head_t bh_wq;
+ wait_queue_head_t bh_evt_wq;
+ u8 buf_id_tx;
+ u8 buf_id_rx;
+ u8 wsm_rx_seq;
+ u8 wsm_tx_seq;
+ int hw_bufs_used;
+ bool powersave_enabled;
+ bool device_can_sleep;
+
+ /* Scan status */
+ struct cw1200_scan scan;
+ /* Keep cw1200 awake (WUP = 1) 1 second after each scan to avoid
+ * FW issue with sleeping/waking up.
+ */
+ atomic_t recent_scan;
+ struct delayed_work clear_recent_scan_work;
+
+ /* WSM */
+ struct wsm_startup_ind wsm_caps;
+ struct mutex wsm_cmd_mux;
+ struct wsm_buf wsm_cmd_buf;
+ struct wsm_cmd wsm_cmd;
+ wait_queue_head_t wsm_cmd_wq;
+ wait_queue_head_t wsm_startup_done;
+ int firmware_ready;
+ atomic_t tx_lock;
+
+ /* WSM debug */
+ int wsm_enable_wsm_dumps;
+
+ /* WSM Join */
+ enum cw1200_join_status join_status;
+ u32 pending_frame_id;
+ bool join_pending;
+ struct delayed_work join_timeout;
+ struct work_struct unjoin_work;
+ struct work_struct join_complete_work;
+ int join_complete_status;
+ int join_dtim_period;
+ bool delayed_unjoin;
+
+ /* TX/RX and security */
+ s8 wep_default_key_id;
+ struct work_struct wep_key_work;
+ u32 key_map;
+ struct wsm_add_key keys[WSM_KEY_MAX_INDEX + 1];
+
+ /* AP powersave */
+ u32 link_id_map;
+ struct cw1200_link_entry link_id_db[CW1200_MAX_STA_IN_AP_MODE];
+ struct work_struct link_id_work;
+ struct delayed_work link_id_gc_work;
+ u32 sta_asleep_mask;
+ u32 pspoll_mask;
+ bool aid0_bit_set;
+ spinlock_t ps_state_lock; /* Protect power save state */
+ bool buffered_multicasts;
+ bool tx_multicast;
+ struct work_struct set_tim_work;
+ struct work_struct set_cts_work;
+ struct work_struct multicast_start_work;
+ struct work_struct multicast_stop_work;
+ struct timer_list mcast_timeout;
+
+ /* WSM events and CQM implementation */
+ spinlock_t event_queue_lock; /* Protect event queue */
+ struct list_head event_queue;
+ struct work_struct event_handler;
+
+ struct delayed_work bss_loss_work;
+ spinlock_t bss_loss_lock; /* Protect BSS loss state */
+ int bss_loss_state;
+ u32 bss_loss_confirm_id;
+ int delayed_link_loss;
+ struct work_struct bss_params_work;
+
+ /* TX rate policy cache */
+ struct tx_policy_cache tx_policy_cache;
+ struct work_struct tx_policy_upload_work;
+
+ /* legacy PS mode switch in suspend */
+ int ps_mode_switch_in_progress;
+ wait_queue_head_t ps_mode_switch_done;
+
+ /* Workaround for WFD testcase 6.1.10*/
+ struct work_struct linkid_reset_work;
+ u8 action_frame_sa[ETH_ALEN];
+ u8 action_linkid;
+};
+
+struct cw1200_sta_priv {
+ int link_id;
+};
+
+/* interfaces for the drivers */
+int cw1200_core_probe(const struct hwbus_ops *hwbus_ops,
+ struct hwbus_priv *hwbus,
+ struct device *pdev,
+ struct cw1200_common **pself,
+ int ref_clk, const u8 *macaddr,
+ const char *sdd_path, bool have_5ghz);
+void cw1200_core_release(struct cw1200_common *self);
+
+#define FWLOAD_BLOCK_SIZE (1024)
+
+static inline int cw1200_is_ht(const struct cw1200_ht_info *ht_info)
+{
+ return ht_info->channel_type != NL80211_CHAN_NO_HT;
+}
+
+static inline int cw1200_ht_greenfield(const struct cw1200_ht_info *ht_info)
+{
+ return cw1200_is_ht(ht_info) &&
+ (ht_info->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
+ !(ht_info->operation_mode &
+ IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+}
+
+static inline int cw1200_ht_ampdu_density(const struct cw1200_ht_info *ht_info)
+{
+ if (!cw1200_is_ht(ht_info))
+ return 0;
+ return ht_info->ht_cap.ampdu_density;
+}
+
+#endif /* CW1200_H */
diff --git a/drivers/net/wireless/cw1200/cw1200_sdio.c b/drivers/net/wireless/cw1200/cw1200_sdio.c
new file mode 100644
index 000000000000..ebdcdf44f155
--- /dev/null
+++ b/drivers/net/wireless/cw1200/cw1200_sdio.c
@@ -0,0 +1,425 @@
+/*
+ * Mac80211 SDIO driver for ST-Ericsson CW1200 device
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio.h>
+#include <net/mac80211.h>
+
+#include "cw1200.h"
+#include "hwbus.h"
+#include <linux/platform_data/net-cw1200.h>
+#include "hwio.h"
+
+MODULE_AUTHOR("Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>");
+MODULE_DESCRIPTION("mac80211 ST-Ericsson CW1200 SDIO driver");
+MODULE_LICENSE("GPL");
+
+#define SDIO_BLOCK_SIZE (512)
+
+/* Default platform data for Sagrad modules */
+static struct cw1200_platform_data_sdio sagrad_109x_evk_platform_data = {
+ .ref_clk = 38400,
+ .have_5ghz = false,
+ .sdd_file = "sdd_sagrad_1091_1098.bin",
+};
+
+/* Allow platform data to be overridden */
+static struct cw1200_platform_data_sdio *global_plat_data = &sagrad_109x_evk_platform_data;
+
+void __init cw1200_sdio_set_platform_data(struct cw1200_platform_data_sdio *pdata)
+{
+ global_plat_data = pdata;
+}
+
+struct hwbus_priv {
+ struct sdio_func *func;
+ struct cw1200_common *core;
+ const struct cw1200_platform_data_sdio *pdata;
+};
+
+#ifndef SDIO_VENDOR_ID_STE
+#define SDIO_VENDOR_ID_STE 0x0020
+#endif
+
+#ifndef SDIO_DEVICE_ID_STE_CW1200
+#define SDIO_DEVICE_ID_STE_CW1200 0x2280
+#endif
+
+static const struct sdio_device_id cw1200_sdio_ids[] = {
+ { SDIO_DEVICE(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200) },
+ { /* end: all zeroes */ },
+};
+
+/* hwbus_ops implemetation */
+
+static int cw1200_sdio_memcpy_fromio(struct hwbus_priv *self,
+ unsigned int addr,
+ void *dst, int count)
+{
+ return sdio_memcpy_fromio(self->func, dst, addr, count);
+}
+
+static int cw1200_sdio_memcpy_toio(struct hwbus_priv *self,
+ unsigned int addr,
+ const void *src, int count)
+{
+ return sdio_memcpy_toio(self->func, addr, (void *)src, count);
+}
+
+static void cw1200_sdio_lock(struct hwbus_priv *self)
+{
+ sdio_claim_host(self->func);
+}
+
+static void cw1200_sdio_unlock(struct hwbus_priv *self)
+{
+ sdio_release_host(self->func);
+}
+
+static void cw1200_sdio_irq_handler(struct sdio_func *func)
+{
+ struct hwbus_priv *self = sdio_get_drvdata(func);
+
+ /* note: sdio_host already claimed here. */
+ if (self->core)
+ cw1200_irq_handler(self->core);
+}
+
+static irqreturn_t cw1200_gpio_hardirq(int irq, void *dev_id)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t cw1200_gpio_irq(int irq, void *dev_id)
+{
+ struct hwbus_priv *self = dev_id;
+
+ if (self->core) {
+ sdio_claim_host(self->func);
+ cw1200_irq_handler(self->core);
+ sdio_release_host(self->func);
+ return IRQ_HANDLED;
+ } else {
+ return IRQ_NONE;
+ }
+}
+
+static int cw1200_request_irq(struct hwbus_priv *self)
+{
+ int ret;
+ u8 cccr;
+
+ cccr = sdio_f0_readb(self->func, SDIO_CCCR_IENx, &ret);
+ if (WARN_ON(ret))
+ goto err;
+
+ /* Master interrupt enable ... */
+ cccr |= BIT(0);
+
+ /* ... for our function */
+ cccr |= BIT(self->func->num);
+
+ sdio_f0_writeb(self->func, cccr, SDIO_CCCR_IENx, &ret);
+ if (WARN_ON(ret))
+ goto err;
+
+ ret = enable_irq_wake(self->pdata->irq);
+ if (WARN_ON(ret))
+ goto err;
+
+ /* Request the IRQ */
+ ret = request_threaded_irq(self->pdata->irq, cw1200_gpio_hardirq,
+ cw1200_gpio_irq,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "cw1200_wlan_irq", self);
+ if (WARN_ON(ret))
+ goto err;
+
+ return 0;
+
+err:
+ return ret;
+}
+
+static int cw1200_sdio_irq_subscribe(struct hwbus_priv *self)
+{
+ int ret = 0;
+
+ pr_debug("SW IRQ subscribe\n");
+ sdio_claim_host(self->func);
+ if (self->pdata->irq)
+ ret = cw1200_request_irq(self);
+ else
+ ret = sdio_claim_irq(self->func, cw1200_sdio_irq_handler);
+
+ sdio_release_host(self->func);
+ return ret;
+}
+
+static int cw1200_sdio_irq_unsubscribe(struct hwbus_priv *self)
+{
+ int ret = 0;
+
+ pr_debug("SW IRQ unsubscribe\n");
+
+ if (self->pdata->irq) {
+ disable_irq_wake(self->pdata->irq);
+ free_irq(self->pdata->irq, self);
+ } else {
+ sdio_claim_host(self->func);
+ ret = sdio_release_irq(self->func);
+ sdio_release_host(self->func);
+ }
+ return ret;
+}
+
+static int cw1200_sdio_off(const struct cw1200_platform_data_sdio *pdata)
+{
+ if (pdata->reset) {
+ gpio_set_value(pdata->reset, 0);
+ msleep(30); /* Min is 2 * CLK32K cycles */
+ gpio_free(pdata->reset);
+ }
+
+ if (pdata->power_ctrl)
+ pdata->power_ctrl(pdata, false);
+ if (pdata->clk_ctrl)
+ pdata->clk_ctrl(pdata, false);
+
+ return 0;
+}
+
+static int cw1200_sdio_on(const struct cw1200_platform_data_sdio *pdata)
+{
+ /* Ensure I/Os are pulled low */
+ if (pdata->reset) {
+ gpio_request(pdata->reset, "cw1200_wlan_reset");
+ gpio_direction_output(pdata->reset, 0);
+ }
+ if (pdata->powerup) {
+ gpio_request(pdata->powerup, "cw1200_wlan_powerup");
+ gpio_direction_output(pdata->powerup, 0);
+ }
+ if (pdata->reset || pdata->powerup)
+ msleep(10); /* Settle time? */
+
+ /* Enable 3v3 and 1v8 to hardware */
+ if (pdata->power_ctrl) {
+ if (pdata->power_ctrl(pdata, true)) {
+ pr_err("power_ctrl() failed!\n");
+ return -1;
+ }
+ }
+
+ /* Enable CLK32K */
+ if (pdata->clk_ctrl) {
+ if (pdata->clk_ctrl(pdata, true)) {
+ pr_err("clk_ctrl() failed!\n");
+ return -1;
+ }
+ msleep(10); /* Delay until clock is stable for 2 cycles */
+ }
+
+ /* Enable POWERUP signal */
+ if (pdata->powerup) {
+ gpio_set_value(pdata->powerup, 1);
+ msleep(250); /* or more..? */
+ }
+ /* Enable RSTn signal */
+ if (pdata->reset) {
+ gpio_set_value(pdata->reset, 1);
+ msleep(50); /* Or more..? */
+ }
+ return 0;
+}
+
+static size_t cw1200_sdio_align_size(struct hwbus_priv *self, size_t size)
+{
+ if (self->pdata->no_nptb)
+ size = round_up(size, SDIO_BLOCK_SIZE);
+ else
+ size = sdio_align_size(self->func, size);
+
+ return size;
+}
+
+static int cw1200_sdio_pm(struct hwbus_priv *self, bool suspend)
+{
+ int ret = 0;
+
+ if (self->pdata->irq)
+ ret = irq_set_irq_wake(self->pdata->irq, suspend);
+ return ret;
+}
+
+static struct hwbus_ops cw1200_sdio_hwbus_ops = {
+ .hwbus_memcpy_fromio = cw1200_sdio_memcpy_fromio,
+ .hwbus_memcpy_toio = cw1200_sdio_memcpy_toio,
+ .lock = cw1200_sdio_lock,
+ .unlock = cw1200_sdio_unlock,
+ .align_size = cw1200_sdio_align_size,
+ .power_mgmt = cw1200_sdio_pm,
+};
+
+/* Probe Function to be called by SDIO stack when device is discovered */
+static int cw1200_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ struct hwbus_priv *self;
+ int status;
+
+ pr_info("cw1200_wlan_sdio: Probe called\n");
+
+ /* We are only able to handle the wlan function */
+ if (func->num != 0x01)
+ return -ENODEV;
+
+ self = kzalloc(sizeof(*self), GFP_KERNEL);
+ if (!self) {
+ pr_err("Can't allocate SDIO hwbus_priv.\n");
+ return -ENOMEM;
+ }
+
+ func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
+
+ self->pdata = global_plat_data; /* FIXME */
+ self->func = func;
+ sdio_set_drvdata(func, self);
+ sdio_claim_host(func);
+ sdio_enable_func(func);
+ sdio_release_host(func);
+
+ status = cw1200_sdio_irq_subscribe(self);
+
+ status = cw1200_core_probe(&cw1200_sdio_hwbus_ops,
+ self, &func->dev, &self->core,
+ self->pdata->ref_clk,
+ self->pdata->macaddr,
+ self->pdata->sdd_file,
+ self->pdata->have_5ghz);
+ if (status) {
+ cw1200_sdio_irq_unsubscribe(self);
+ sdio_claim_host(func);
+ sdio_disable_func(func);
+ sdio_release_host(func);
+ sdio_set_drvdata(func, NULL);
+ kfree(self);
+ }
+
+ return status;
+}
+
+/* Disconnect Function to be called by SDIO stack when
+ * device is disconnected
+ */
+static void cw1200_sdio_disconnect(struct sdio_func *func)
+{
+ struct hwbus_priv *self = sdio_get_drvdata(func);
+
+ if (self) {
+ cw1200_sdio_irq_unsubscribe(self);
+ if (self->core) {
+ cw1200_core_release(self->core);
+ self->core = NULL;
+ }
+ sdio_claim_host(func);
+ sdio_disable_func(func);
+ sdio_release_host(func);
+ sdio_set_drvdata(func, NULL);
+ kfree(self);
+ }
+}
+
+#ifdef CONFIG_PM
+static int cw1200_sdio_suspend(struct device *dev)
+{
+ int ret;
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ struct hwbus_priv *self = sdio_get_drvdata(func);
+
+ if (!cw1200_can_suspend(self->core))
+ return -EAGAIN;
+
+ /* Notify SDIO that CW1200 will remain powered during suspend */
+ ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+ if (ret)
+ pr_err("Error setting SDIO pm flags: %i\n", ret);
+
+ return ret;
+}
+
+static int cw1200_sdio_resume(struct device *dev)
+{
+ return 0;
+}
+
+static const struct dev_pm_ops cw1200_pm_ops = {
+ .suspend = cw1200_sdio_suspend,
+ .resume = cw1200_sdio_resume,
+};
+#endif
+
+static struct sdio_driver sdio_driver = {
+ .name = "cw1200_wlan_sdio",
+ .id_table = cw1200_sdio_ids,
+ .probe = cw1200_sdio_probe,
+ .remove = cw1200_sdio_disconnect,
+#ifdef CONFIG_PM
+ .drv = {
+ .pm = &cw1200_pm_ops,
+ }
+#endif
+};
+
+/* Init Module function -> Called by insmod */
+static int __init cw1200_sdio_init(void)
+{
+ const struct cw1200_platform_data_sdio *pdata;
+ int ret;
+
+ /* FIXME -- this won't support multiple devices */
+ pdata = global_plat_data;
+
+ if (cw1200_sdio_on(pdata)) {
+ ret = -1;
+ goto err;
+ }
+
+ ret = sdio_register_driver(&sdio_driver);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ cw1200_sdio_off(pdata);
+ return ret;
+}
+
+/* Called at Driver Unloading */
+static void __exit cw1200_sdio_exit(void)
+{
+ const struct cw1200_platform_data_sdio *pdata;
+
+ /* FIXME -- this won't support multiple devices */
+ pdata = global_plat_data;
+ sdio_unregister_driver(&sdio_driver);
+ cw1200_sdio_off(pdata);
+}
+
+
+module_init(cw1200_sdio_init);
+module_exit(cw1200_sdio_exit);
diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c
new file mode 100644
index 000000000000..d06376014bcd
--- /dev/null
+++ b/drivers/net/wireless/cw1200/cw1200_spi.c
@@ -0,0 +1,471 @@
+/*
+ * Mac80211 SPI driver for ST-Ericsson CW1200 device
+ *
+ * Copyright (c) 2011, Sagrad Inc.
+ * Author: Solomon Peachy <speachy@sagrad.com>
+ *
+ * Based on cw1200_sdio.c
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <net/mac80211.h>
+
+#include <linux/spi/spi.h>
+#include <linux/device.h>
+
+#include "cw1200.h"
+#include "hwbus.h"
+#include <linux/platform_data/net-cw1200.h>
+#include "hwio.h"
+
+MODULE_AUTHOR("Solomon Peachy <speachy@sagrad.com>");
+MODULE_DESCRIPTION("mac80211 ST-Ericsson CW1200 SPI driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:cw1200_wlan_spi");
+
+/* #define SPI_DEBUG */
+
+struct hwbus_priv {
+ struct spi_device *func;
+ struct cw1200_common *core;
+ const struct cw1200_platform_data_spi *pdata;
+ spinlock_t lock; /* Serialize all bus operations */
+ int claimed;
+};
+
+#define SDIO_TO_SPI_ADDR(addr) ((addr & 0x1f)>>2)
+#define SET_WRITE 0x7FFF /* usage: and operation */
+#define SET_READ 0x8000 /* usage: or operation */
+
+/* Notes on byte ordering:
+ LE: B0 B1 B2 B3
+ BE: B3 B2 B1 B0
+
+ Hardware expects 32-bit data to be written as 16-bit BE words:
+
+ B1 B0 B3 B2
+*/
+
+static int cw1200_spi_memcpy_fromio(struct hwbus_priv *self,
+ unsigned int addr,
+ void *dst, int count)
+{
+ int ret, i;
+ u16 regaddr;
+ struct spi_message m;
+
+ struct spi_transfer t_addr = {
+ .tx_buf = &regaddr,
+ .len = sizeof(regaddr),
+ };
+ struct spi_transfer t_msg = {
+ .rx_buf = dst,
+ .len = count,
+ };
+
+ regaddr = (SDIO_TO_SPI_ADDR(addr))<<12;
+ regaddr |= SET_READ;
+ regaddr |= (count>>1);
+
+#ifdef SPI_DEBUG
+ pr_info("READ : %04d from 0x%02x (%04x)\n", count, addr, regaddr);
+#endif
+
+ /* Header is LE16 */
+ regaddr = cpu_to_le16(regaddr);
+
+ /* We have to byteswap if the SPI bus is limited to 8b operation
+ or we are running on a Big Endian system
+ */
+#if defined(__LITTLE_ENDIAN)
+ if (self->func->bits_per_word == 8)
+#endif
+ regaddr = swab16(regaddr);
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t_addr, &m);
+ spi_message_add_tail(&t_msg, &m);
+ ret = spi_sync(self->func, &m);
+
+#ifdef SPI_DEBUG
+ pr_info("READ : ");
+ for (i = 0; i < t_addr.len; i++)
+ printk("%02x ", ((u8 *)t_addr.tx_buf)[i]);
+ printk(" : ");
+ for (i = 0; i < t_msg.len; i++)
+ printk("%02x ", ((u8 *)t_msg.rx_buf)[i]);
+ printk("\n");
+#endif
+
+ /* We have to byteswap if the SPI bus is limited to 8b operation
+ or we are running on a Big Endian system
+ */
+#if defined(__LITTLE_ENDIAN)
+ if (self->func->bits_per_word == 8)
+#endif
+ {
+ uint16_t *buf = (uint16_t *)dst;
+ for (i = 0; i < ((count + 1) >> 1); i++)
+ buf[i] = swab16(buf[i]);
+ }
+
+ return ret;
+}
+
+static int cw1200_spi_memcpy_toio(struct hwbus_priv *self,
+ unsigned int addr,
+ const void *src, int count)
+{
+ int rval, i;
+ u16 regaddr;
+ struct spi_transfer t_addr = {
+ .tx_buf = &regaddr,
+ .len = sizeof(regaddr),
+ };
+ struct spi_transfer t_msg = {
+ .tx_buf = src,
+ .len = count,
+ };
+ struct spi_message m;
+
+ regaddr = (SDIO_TO_SPI_ADDR(addr))<<12;
+ regaddr &= SET_WRITE;
+ regaddr |= (count>>1);
+
+#ifdef SPI_DEBUG
+ pr_info("WRITE: %04d to 0x%02x (%04x)\n", count, addr, regaddr);
+#endif
+
+ /* Header is LE16 */
+ regaddr = cpu_to_le16(regaddr);
+
+ /* We have to byteswap if the SPI bus is limited to 8b operation
+ or we are running on a Big Endian system
+ */
+#if defined(__LITTLE_ENDIAN)
+ if (self->func->bits_per_word == 8)
+#endif
+ {
+ uint16_t *buf = (uint16_t *)src;
+ regaddr = swab16(regaddr);
+ for (i = 0; i < ((count + 1) >> 1); i++)
+ buf[i] = swab16(buf[i]);
+ }
+
+#ifdef SPI_DEBUG
+ pr_info("WRITE: ");
+ for (i = 0; i < t_addr.len; i++)
+ printk("%02x ", ((u8 *)t_addr.tx_buf)[i]);
+ printk(" : ");
+ for (i = 0; i < t_msg.len; i++)
+ printk("%02x ", ((u8 *)t_msg.tx_buf)[i]);
+ printk("\n");
+#endif
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t_addr, &m);
+ spi_message_add_tail(&t_msg, &m);
+ rval = spi_sync(self->func, &m);
+
+#ifdef SPI_DEBUG
+ pr_info("WROTE: %d\n", m.actual_length);
+#endif
+
+#if defined(__LITTLE_ENDIAN)
+ /* We have to byteswap if the SPI bus is limited to 8b operation */
+ if (self->func->bits_per_word == 8)
+#endif
+ {
+ uint16_t *buf = (uint16_t *)src;
+ for (i = 0; i < ((count + 1) >> 1); i++)
+ buf[i] = swab16(buf[i]);
+ }
+ return rval;
+}
+
+static void cw1200_spi_lock(struct hwbus_priv *self)
+{
+ unsigned long flags;
+
+ might_sleep();
+
+ spin_lock_irqsave(&self->lock, flags);
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (!self->claimed)
+ break;
+ spin_unlock_irqrestore(&self->lock, flags);
+ schedule();
+ spin_lock_irqsave(&self->lock, flags);
+ }
+ set_current_state(TASK_RUNNING);
+ self->claimed = 1;
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ return;
+}
+
+static void cw1200_spi_unlock(struct hwbus_priv *self)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&self->lock, flags);
+ self->claimed = 0;
+ spin_unlock_irqrestore(&self->lock, flags);
+ return;
+}
+
+static irqreturn_t cw1200_spi_irq_handler(int irq, void *dev_id)
+{
+ struct hwbus_priv *self = dev_id;
+
+ if (self->core) {
+ cw1200_irq_handler(self->core);
+ return IRQ_HANDLED;
+ } else {
+ return IRQ_NONE;
+ }
+}
+
+static int cw1200_spi_irq_subscribe(struct hwbus_priv *self)
+{
+ int ret;
+
+ pr_debug("SW IRQ subscribe\n");
+
+ ret = request_any_context_irq(self->func->irq, cw1200_spi_irq_handler,
+ IRQF_TRIGGER_HIGH,
+ "cw1200_wlan_irq", self);
+ if (WARN_ON(ret < 0))
+ goto exit;
+
+ ret = enable_irq_wake(self->func->irq);
+ if (WARN_ON(ret))
+ goto free_irq;
+
+ return 0;
+
+free_irq:
+ free_irq(self->func->irq, self);
+exit:
+ return ret;
+}
+
+static int cw1200_spi_irq_unsubscribe(struct hwbus_priv *self)
+{
+ int ret = 0;
+
+ pr_debug("SW IRQ unsubscribe\n");
+ disable_irq_wake(self->func->irq);
+ free_irq(self->func->irq, self);
+
+ return ret;
+}
+
+static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata)
+{
+ if (pdata->reset) {
+ gpio_set_value(pdata->reset, 0);
+ msleep(30); /* Min is 2 * CLK32K cycles */
+ gpio_free(pdata->reset);
+ }
+
+ if (pdata->power_ctrl)
+ pdata->power_ctrl(pdata, false);
+ if (pdata->clk_ctrl)
+ pdata->clk_ctrl(pdata, false);
+
+ return 0;
+}
+
+static int cw1200_spi_on(const struct cw1200_platform_data_spi *pdata)
+{
+ /* Ensure I/Os are pulled low */
+ if (pdata->reset) {
+ gpio_request(pdata->reset, "cw1200_wlan_reset");
+ gpio_direction_output(pdata->reset, 0);
+ }
+ if (pdata->powerup) {
+ gpio_request(pdata->powerup, "cw1200_wlan_powerup");
+ gpio_direction_output(pdata->powerup, 0);
+ }
+ if (pdata->reset || pdata->powerup)
+ msleep(10); /* Settle time? */
+
+ /* Enable 3v3 and 1v8 to hardware */
+ if (pdata->power_ctrl) {
+ if (pdata->power_ctrl(pdata, true)) {
+ pr_err("power_ctrl() failed!\n");
+ return -1;
+ }
+ }
+
+ /* Enable CLK32K */
+ if (pdata->clk_ctrl) {
+ if (pdata->clk_ctrl(pdata, true)) {
+ pr_err("clk_ctrl() failed!\n");
+ return -1;
+ }
+ msleep(10); /* Delay until clock is stable for 2 cycles */
+ }
+
+ /* Enable POWERUP signal */
+ if (pdata->powerup) {
+ gpio_set_value(pdata->powerup, 1);
+ msleep(250); /* or more..? */
+ }
+ /* Enable RSTn signal */
+ if (pdata->reset) {
+ gpio_set_value(pdata->reset, 1);
+ msleep(50); /* Or more..? */
+ }
+ return 0;
+}
+
+static size_t cw1200_spi_align_size(struct hwbus_priv *self, size_t size)
+{
+ return size & 1 ? size + 1 : size;
+}
+
+static int cw1200_spi_pm(struct hwbus_priv *self, bool suspend)
+{
+ return irq_set_irq_wake(self->func->irq, suspend);
+}
+
+static struct hwbus_ops cw1200_spi_hwbus_ops = {
+ .hwbus_memcpy_fromio = cw1200_spi_memcpy_fromio,
+ .hwbus_memcpy_toio = cw1200_spi_memcpy_toio,
+ .lock = cw1200_spi_lock,
+ .unlock = cw1200_spi_unlock,
+ .align_size = cw1200_spi_align_size,
+ .power_mgmt = cw1200_spi_pm,
+};
+
+/* Probe Function to be called by SPI stack when device is discovered */
+static int cw1200_spi_probe(struct spi_device *func)
+{
+ const struct cw1200_platform_data_spi *plat_data =
+ func->dev.platform_data;
+ struct hwbus_priv *self;
+ int status;
+
+ /* Sanity check speed */
+ if (func->max_speed_hz > 52000000)
+ func->max_speed_hz = 52000000;
+ if (func->max_speed_hz < 1000000)
+ func->max_speed_hz = 1000000;
+
+ /* Fix up transfer size */
+ if (plat_data->spi_bits_per_word)
+ func->bits_per_word = plat_data->spi_bits_per_word;
+ if (!func->bits_per_word)
+ func->bits_per_word = 16;
+
+ /* And finally.. */
+ func->mode = SPI_MODE_0;
+
+ pr_info("cw1200_wlan_spi: Probe called (CS %d M %d BPW %d CLK %d)\n",
+ func->chip_select, func->mode, func->bits_per_word,
+ func->max_speed_hz);
+
+ if (cw1200_spi_on(plat_data)) {
+ pr_err("spi_on() failed!\n");
+ return -1;
+ }
+
+ if (spi_setup(func)) {
+ pr_err("spi_setup() failed!\n");
+ return -1;
+ }
+
+ self = kzalloc(sizeof(*self), GFP_KERNEL);
+ if (!self) {
+ pr_err("Can't allocate SPI hwbus_priv.");
+ return -ENOMEM;
+ }
+
+ self->pdata = plat_data;
+ self->func = func;
+ spin_lock_init(&self->lock);
+
+ spi_set_drvdata(func, self);
+
+ status = cw1200_spi_irq_subscribe(self);
+
+ status = cw1200_core_probe(&cw1200_spi_hwbus_ops,
+ self, &func->dev, &self->core,
+ self->pdata->ref_clk,
+ self->pdata->macaddr,
+ self->pdata->sdd_file,
+ self->pdata->have_5ghz);
+
+ if (status) {
+ cw1200_spi_irq_unsubscribe(self);
+ cw1200_spi_off(plat_data);
+ kfree(self);
+ }
+
+ return status;
+}
+
+/* Disconnect Function to be called by SPI stack when device is disconnected */
+static int cw1200_spi_disconnect(struct spi_device *func)
+{
+ struct hwbus_priv *self = spi_get_drvdata(func);
+
+ if (self) {
+ cw1200_spi_irq_unsubscribe(self);
+ if (self->core) {
+ cw1200_core_release(self->core);
+ self->core = NULL;
+ }
+ kfree(self);
+ }
+ cw1200_spi_off(func->dev.platform_data);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int cw1200_spi_suspend(struct device *dev, pm_message_t state)
+{
+ struct hwbus_priv *self = spi_get_drvdata(to_spi_device(dev));
+
+ if (!cw1200_can_suspend(self->core))
+ return -EAGAIN;
+
+ /* XXX notify host that we have to keep CW1200 powered on? */
+ return 0;
+}
+
+static int cw1200_spi_resume(struct device *dev)
+{
+ return 0;
+}
+#endif
+
+static struct spi_driver spi_driver = {
+ .probe = cw1200_spi_probe,
+ .remove = cw1200_spi_disconnect,
+ .driver = {
+ .name = "cw1200_wlan_spi",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .suspend = cw1200_spi_suspend,
+ .resume = cw1200_spi_resume,
+#endif
+ },
+};
+
+module_spi_driver(spi_driver);
diff --git a/drivers/net/wireless/cw1200/debug.c b/drivers/net/wireless/cw1200/debug.c
new file mode 100644
index 000000000000..e323b4d54338
--- /dev/null
+++ b/drivers/net/wireless/cw1200/debug.c
@@ -0,0 +1,428 @@
+/*
+ * mac80211 glue code for mac80211 ST-Ericsson CW1200 drivers
+ * DebugFS code
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include "cw1200.h"
+#include "debug.h"
+#include "fwio.h"
+
+/* join_status */
+static const char * const cw1200_debug_join_status[] = {
+ "passive",
+ "monitor",
+ "station (joining)",
+ "station (not authenticated yet)",
+ "station",
+ "adhoc",
+ "access point",
+};
+
+/* WSM_JOIN_PREAMBLE_... */
+static const char * const cw1200_debug_preamble[] = {
+ "long",
+ "short",
+ "long on 1 and 2 Mbps",
+};
+
+
+static const char * const cw1200_debug_link_id[] = {
+ "OFF",
+ "REQ",
+ "SOFT",
+ "HARD",
+};
+
+static const char *cw1200_debug_mode(int mode)
+{
+ switch (mode) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ return "unspecified";
+ case NL80211_IFTYPE_MONITOR:
+ return "monitor";
+ case NL80211_IFTYPE_STATION:
+ return "station";
+ case NL80211_IFTYPE_ADHOC:
+ return "adhoc";
+ case NL80211_IFTYPE_MESH_POINT:
+ return "mesh point";
+ case NL80211_IFTYPE_AP:
+ return "access point";
+ case NL80211_IFTYPE_P2P_CLIENT:
+ return "p2p client";
+ case NL80211_IFTYPE_P2P_GO:
+ return "p2p go";
+ default:
+ return "unsupported";
+ }
+}
+
+static void cw1200_queue_status_show(struct seq_file *seq,
+ struct cw1200_queue *q)
+{
+ int i;
+ seq_printf(seq, "Queue %d:\n", q->queue_id);
+ seq_printf(seq, " capacity: %zu\n", q->capacity);
+ seq_printf(seq, " queued: %zu\n", q->num_queued);
+ seq_printf(seq, " pending: %zu\n", q->num_pending);
+ seq_printf(seq, " sent: %zu\n", q->num_sent);
+ seq_printf(seq, " locked: %s\n", q->tx_locked_cnt ? "yes" : "no");
+ seq_printf(seq, " overfull: %s\n", q->overfull ? "yes" : "no");
+ seq_puts(seq, " link map: 0-> ");
+ for (i = 0; i < q->stats->map_capacity; ++i)
+ seq_printf(seq, "%.2d ", q->link_map_cache[i]);
+ seq_printf(seq, "<-%zu\n", q->stats->map_capacity);
+}
+
+static void cw1200_debug_print_map(struct seq_file *seq,
+ struct cw1200_common *priv,
+ const char *label,
+ u32 map)
+{
+ int i;
+ seq_printf(seq, "%s0-> ", label);
+ for (i = 0; i < priv->tx_queue_stats.map_capacity; ++i)
+ seq_printf(seq, "%s ", (map & BIT(i)) ? "**" : "..");
+ seq_printf(seq, "<-%zu\n", priv->tx_queue_stats.map_capacity - 1);
+}
+
+static int cw1200_status_show(struct seq_file *seq, void *v)
+{
+ int i;
+ struct list_head *item;
+ struct cw1200_common *priv = seq->private;
+ struct cw1200_debug_priv *d = priv->debug;
+
+ seq_puts(seq, "CW1200 Wireless LAN driver status\n");
+ seq_printf(seq, "Hardware: %d.%d\n",
+ priv->wsm_caps.hw_id,
+ priv->wsm_caps.hw_subid);
+ seq_printf(seq, "Firmware: %s %d.%d\n",
+ cw1200_fw_types[priv->wsm_caps.fw_type],
+ priv->wsm_caps.fw_ver,
+ priv->wsm_caps.fw_build);
+ seq_printf(seq, "FW API: %d\n",
+ priv->wsm_caps.fw_api);
+ seq_printf(seq, "FW caps: 0x%.4X\n",
+ priv->wsm_caps.fw_cap);
+ seq_printf(seq, "FW label: '%s'\n",
+ priv->wsm_caps.fw_label);
+ seq_printf(seq, "Mode: %s%s\n",
+ cw1200_debug_mode(priv->mode),
+ priv->listening ? " (listening)" : "");
+ seq_printf(seq, "Join state: %s\n",
+ cw1200_debug_join_status[priv->join_status]);
+ if (priv->channel)
+ seq_printf(seq, "Channel: %d%s\n",
+ priv->channel->hw_value,
+ priv->channel_switch_in_progress ?
+ " (switching)" : "");
+ if (priv->rx_filter.promiscuous)
+ seq_puts(seq, "Filter: promisc\n");
+ else if (priv->rx_filter.fcs)
+ seq_puts(seq, "Filter: fcs\n");
+ if (priv->rx_filter.bssid)
+ seq_puts(seq, "Filter: bssid\n");
+ if (!priv->disable_beacon_filter)
+ seq_puts(seq, "Filter: beacons\n");
+
+ if (priv->enable_beacon ||
+ priv->mode == NL80211_IFTYPE_AP ||
+ priv->mode == NL80211_IFTYPE_ADHOC ||
+ priv->mode == NL80211_IFTYPE_MESH_POINT ||
+ priv->mode == NL80211_IFTYPE_P2P_GO)
+ seq_printf(seq, "Beaconing: %s\n",
+ priv->enable_beacon ?
+ "enabled" : "disabled");
+
+ for (i = 0; i < 4; ++i)
+ seq_printf(seq, "EDCA(%d): %d, %d, %d, %d, %d\n", i,
+ priv->edca.params[i].cwmin,
+ priv->edca.params[i].cwmax,
+ priv->edca.params[i].aifns,
+ priv->edca.params[i].txop_limit,
+ priv->edca.params[i].max_rx_lifetime);
+
+ if (priv->join_status == CW1200_JOIN_STATUS_STA) {
+ static const char *pm_mode = "unknown";
+ switch (priv->powersave_mode.mode) {
+ case WSM_PSM_ACTIVE:
+ pm_mode = "off";
+ break;
+ case WSM_PSM_PS:
+ pm_mode = "on";
+ break;
+ case WSM_PSM_FAST_PS:
+ pm_mode = "dynamic";
+ break;
+ }
+ seq_printf(seq, "Preamble: %s\n",
+ cw1200_debug_preamble[priv->association_mode.preamble]);
+ seq_printf(seq, "AMPDU spcn: %d\n",
+ priv->association_mode.mpdu_start_spacing);
+ seq_printf(seq, "Basic rate: 0x%.8X\n",
+ le32_to_cpu(priv->association_mode.basic_rate_set));
+ seq_printf(seq, "Bss lost: %d beacons\n",
+ priv->bss_params.beacon_lost_count);
+ seq_printf(seq, "AID: %d\n",
+ priv->bss_params.aid);
+ seq_printf(seq, "Rates: 0x%.8X\n",
+ priv->bss_params.operational_rate_set);
+ seq_printf(seq, "Powersave: %s\n", pm_mode);
+ }
+ seq_printf(seq, "HT: %s\n",
+ cw1200_is_ht(&priv->ht_info) ? "on" : "off");
+ if (cw1200_is_ht(&priv->ht_info)) {
+ seq_printf(seq, "Greenfield: %s\n",
+ cw1200_ht_greenfield(&priv->ht_info) ? "yes" : "no");
+ seq_printf(seq, "AMPDU dens: %d\n",
+ cw1200_ht_ampdu_density(&priv->ht_info));
+ }
+ seq_printf(seq, "RSSI thold: %d\n",
+ priv->cqm_rssi_thold);
+ seq_printf(seq, "RSSI hyst: %d\n",
+ priv->cqm_rssi_hyst);
+ seq_printf(seq, "Long retr: %d\n",
+ priv->long_frame_max_tx_count);
+ seq_printf(seq, "Short retr: %d\n",
+ priv->short_frame_max_tx_count);
+ spin_lock_bh(&priv->tx_policy_cache.lock);
+ i = 0;
+ list_for_each(item, &priv->tx_policy_cache.used)
+ ++i;
+ spin_unlock_bh(&priv->tx_policy_cache.lock);
+ seq_printf(seq, "RC in use: %d\n", i);
+
+ seq_puts(seq, "\n");
+ for (i = 0; i < 4; ++i) {
+ cw1200_queue_status_show(seq, &priv->tx_queue[i]);
+ seq_puts(seq, "\n");
+ }
+
+ cw1200_debug_print_map(seq, priv, "Link map: ",
+ priv->link_id_map);
+ cw1200_debug_print_map(seq, priv, "Asleep map: ",
+ priv->sta_asleep_mask);
+ cw1200_debug_print_map(seq, priv, "PSPOLL map: ",
+ priv->pspoll_mask);
+
+ seq_puts(seq, "\n");
+
+ for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) {
+ if (priv->link_id_db[i].status) {
+ seq_printf(seq, "Link %d: %s, %pM\n",
+ i + 1,
+ cw1200_debug_link_id[priv->link_id_db[i].status],
+ priv->link_id_db[i].mac);
+ }
+ }
+
+ seq_puts(seq, "\n");
+
+ seq_printf(seq, "BH status: %s\n",
+ atomic_read(&priv->bh_term) ? "terminated" : "alive");
+ seq_printf(seq, "Pending RX: %d\n",
+ atomic_read(&priv->bh_rx));
+ seq_printf(seq, "Pending TX: %d\n",
+ atomic_read(&priv->bh_tx));
+ if (priv->bh_error)
+ seq_printf(seq, "BH errcode: %d\n",
+ priv->bh_error);
+ seq_printf(seq, "TX bufs: %d x %d bytes\n",
+ priv->wsm_caps.input_buffers,
+ priv->wsm_caps.input_buffer_size);
+ seq_printf(seq, "Used bufs: %d\n",
+ priv->hw_bufs_used);
+ seq_printf(seq, "Powermgmt: %s\n",
+ priv->powersave_enabled ? "on" : "off");
+ seq_printf(seq, "Device: %s\n",
+ priv->device_can_sleep ? "asleep" : "awake");
+
+ spin_lock(&priv->wsm_cmd.lock);
+ seq_printf(seq, "WSM status: %s\n",
+ priv->wsm_cmd.done ? "idle" : "active");
+ seq_printf(seq, "WSM cmd: 0x%.4X (%td bytes)\n",
+ priv->wsm_cmd.cmd, priv->wsm_cmd.len);
+ seq_printf(seq, "WSM retval: %d\n",
+ priv->wsm_cmd.ret);
+ spin_unlock(&priv->wsm_cmd.lock);
+
+ seq_printf(seq, "Datapath: %s\n",
+ atomic_read(&priv->tx_lock) ? "locked" : "unlocked");
+ if (atomic_read(&priv->tx_lock))
+ seq_printf(seq, "TXlock cnt: %d\n",
+ atomic_read(&priv->tx_lock));
+
+ seq_printf(seq, "TXed: %d\n",
+ d->tx);
+ seq_printf(seq, "AGG TXed: %d\n",
+ d->tx_agg);
+ seq_printf(seq, "MULTI TXed: %d (%d)\n",
+ d->tx_multi, d->tx_multi_frames);
+ seq_printf(seq, "RXed: %d\n",
+ d->rx);
+ seq_printf(seq, "AGG RXed: %d\n",
+ d->rx_agg);
+ seq_printf(seq, "TX miss: %d\n",
+ d->tx_cache_miss);
+ seq_printf(seq, "TX align: %d\n",
+ d->tx_align);
+ seq_printf(seq, "TX burst: %d\n",
+ d->tx_burst);
+ seq_printf(seq, "TX TTL: %d\n",
+ d->tx_ttl);
+ seq_printf(seq, "Scan: %s\n",
+ atomic_read(&priv->scan.in_progress) ? "active" : "idle");
+
+ return 0;
+}
+
+static int cw1200_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, &cw1200_status_show,
+ inode->i_private);
+}
+
+static const struct file_operations fops_status = {
+ .open = cw1200_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int cw1200_counters_show(struct seq_file *seq, void *v)
+{
+ int ret;
+ struct cw1200_common *priv = seq->private;
+ struct wsm_mib_counters_table counters;
+
+ ret = wsm_get_counters_table(priv, &counters);
+ if (ret)
+ return ret;
+
+#define PUT_COUNTER(tab, name) \
+ seq_printf(seq, "%s:" tab "%d\n", #name, \
+ __le32_to_cpu(counters.name))
+
+ PUT_COUNTER("\t\t", plcp_errors);
+ PUT_COUNTER("\t\t", fcs_errors);
+ PUT_COUNTER("\t\t", tx_packets);
+ PUT_COUNTER("\t\t", rx_packets);
+ PUT_COUNTER("\t\t", rx_packet_errors);
+ PUT_COUNTER("\t", rx_decryption_failures);
+ PUT_COUNTER("\t\t", rx_mic_failures);
+ PUT_COUNTER("\t", rx_no_key_failures);
+ PUT_COUNTER("\t", tx_multicast_frames);
+ PUT_COUNTER("\t", tx_frames_success);
+ PUT_COUNTER("\t", tx_frame_failures);
+ PUT_COUNTER("\t", tx_frames_retried);
+ PUT_COUNTER("\t", tx_frames_multi_retried);
+ PUT_COUNTER("\t", rx_frame_duplicates);
+ PUT_COUNTER("\t\t", rts_success);
+ PUT_COUNTER("\t\t", rts_failures);
+ PUT_COUNTER("\t\t", ack_failures);
+ PUT_COUNTER("\t", rx_multicast_frames);
+ PUT_COUNTER("\t", rx_frames_success);
+ PUT_COUNTER("\t", rx_cmac_icv_errors);
+ PUT_COUNTER("\t\t", rx_cmac_replays);
+ PUT_COUNTER("\t", rx_mgmt_ccmp_replays);
+
+#undef PUT_COUNTER
+
+ return 0;
+}
+
+static int cw1200_counters_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, &cw1200_counters_show,
+ inode->i_private);
+}
+
+static const struct file_operations fops_counters = {
+ .open = cw1200_counters_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t cw1200_wsm_dumps(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct cw1200_common *priv = file->private_data;
+ char buf[1];
+
+ if (!count)
+ return -EINVAL;
+ if (copy_from_user(buf, user_buf, 1))
+ return -EFAULT;
+
+ if (buf[0] == '1')
+ priv->wsm_enable_wsm_dumps = 1;
+ else
+ priv->wsm_enable_wsm_dumps = 0;
+
+ return count;
+}
+
+static const struct file_operations fops_wsm_dumps = {
+ .open = simple_open,
+ .write = cw1200_wsm_dumps,
+ .llseek = default_llseek,
+};
+
+int cw1200_debug_init(struct cw1200_common *priv)
+{
+ int ret = -ENOMEM;
+ struct cw1200_debug_priv *d = kzalloc(sizeof(struct cw1200_debug_priv),
+ GFP_KERNEL);
+ priv->debug = d;
+ if (!d)
+ return ret;
+
+ d->debugfs_phy = debugfs_create_dir("cw1200",
+ priv->hw->wiphy->debugfsdir);
+ if (!d->debugfs_phy)
+ goto err;
+
+ if (!debugfs_create_file("status", S_IRUSR, d->debugfs_phy,
+ priv, &fops_status))
+ goto err;
+
+ if (!debugfs_create_file("counters", S_IRUSR, d->debugfs_phy,
+ priv, &fops_counters))
+ goto err;
+
+ if (!debugfs_create_file("wsm_dumps", S_IWUSR, d->debugfs_phy,
+ priv, &fops_wsm_dumps))
+ goto err;
+
+ return 0;
+
+err:
+ priv->debug = NULL;
+ debugfs_remove_recursive(d->debugfs_phy);
+ kfree(d);
+ return ret;
+}
+
+void cw1200_debug_release(struct cw1200_common *priv)
+{
+ struct cw1200_debug_priv *d = priv->debug;
+ if (d) {
+ debugfs_remove_recursive(d->debugfs_phy);
+ priv->debug = NULL;
+ kfree(d);
+ }
+}
diff --git a/drivers/net/wireless/cw1200/debug.h b/drivers/net/wireless/cw1200/debug.h
new file mode 100644
index 000000000000..b525aba53bfc
--- /dev/null
+++ b/drivers/net/wireless/cw1200/debug.h
@@ -0,0 +1,93 @@
+/*
+ * DebugFS code for ST-Ericsson CW1200 mac80211 driver
+ *
+ * Copyright (c) 2011, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CW1200_DEBUG_H_INCLUDED
+#define CW1200_DEBUG_H_INCLUDED
+
+struct cw1200_debug_priv {
+ struct dentry *debugfs_phy;
+ int tx;
+ int tx_agg;
+ int rx;
+ int rx_agg;
+ int tx_multi;
+ int tx_multi_frames;
+ int tx_cache_miss;
+ int tx_align;
+ int tx_ttl;
+ int tx_burst;
+ int ba_cnt;
+ int ba_acc;
+ int ba_cnt_rx;
+ int ba_acc_rx;
+};
+
+int cw1200_debug_init(struct cw1200_common *priv);
+void cw1200_debug_release(struct cw1200_common *priv);
+
+static inline void cw1200_debug_txed(struct cw1200_common *priv)
+{
+ ++priv->debug->tx;
+}
+
+static inline void cw1200_debug_txed_agg(struct cw1200_common *priv)
+{
+ ++priv->debug->tx_agg;
+}
+
+static inline void cw1200_debug_txed_multi(struct cw1200_common *priv,
+ int count)
+{
+ ++priv->debug->tx_multi;
+ priv->debug->tx_multi_frames += count;
+}
+
+static inline void cw1200_debug_rxed(struct cw1200_common *priv)
+{
+ ++priv->debug->rx;
+}
+
+static inline void cw1200_debug_rxed_agg(struct cw1200_common *priv)
+{
+ ++priv->debug->rx_agg;
+}
+
+static inline void cw1200_debug_tx_cache_miss(struct cw1200_common *priv)
+{
+ ++priv->debug->tx_cache_miss;
+}
+
+static inline void cw1200_debug_tx_align(struct cw1200_common *priv)
+{
+ ++priv->debug->tx_align;
+}
+
+static inline void cw1200_debug_tx_ttl(struct cw1200_common *priv)
+{
+ ++priv->debug->tx_ttl;
+}
+
+static inline void cw1200_debug_tx_burst(struct cw1200_common *priv)
+{
+ ++priv->debug->tx_burst;
+}
+
+static inline void cw1200_debug_ba(struct cw1200_common *priv,
+ int ba_cnt, int ba_acc,
+ int ba_cnt_rx, int ba_acc_rx)
+{
+ priv->debug->ba_cnt = ba_cnt;
+ priv->debug->ba_acc = ba_acc;
+ priv->debug->ba_cnt_rx = ba_cnt_rx;
+ priv->debug->ba_acc_rx = ba_acc_rx;
+}
+
+#endif /* CW1200_DEBUG_H_INCLUDED */
diff --git a/drivers/net/wireless/cw1200/fwio.c b/drivers/net/wireless/cw1200/fwio.c
new file mode 100644
index 000000000000..acdff0f7f952
--- /dev/null
+++ b/drivers/net/wireless/cw1200/fwio.c
@@ -0,0 +1,520 @@
+/*
+ * Firmware I/O code for mac80211 ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * Based on:
+ * ST-Ericsson UMAC CW1200 driver which is
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Ajitpal Singh <ajitpal.singh@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <linux/sched.h>
+#include <linux/firmware.h>
+
+#include "cw1200.h"
+#include "fwio.h"
+#include "hwio.h"
+#include "hwbus.h"
+#include "bh.h"
+
+static int cw1200_get_hw_type(u32 config_reg_val, int *major_revision)
+{
+ int hw_type = -1;
+ u32 silicon_type = (config_reg_val >> 24) & 0x7;
+ u32 silicon_vers = (config_reg_val >> 31) & 0x1;
+
+ switch (silicon_type) {
+ case 0x00:
+ *major_revision = 1;
+ hw_type = HIF_9000_SILICON_VERSATILE;
+ break;
+ case 0x01:
+ case 0x02: /* CW1x00 */
+ case 0x04: /* CW1x60 */
+ *major_revision = silicon_type;
+ if (silicon_vers)
+ hw_type = HIF_8601_VERSATILE;
+ else
+ hw_type = HIF_8601_SILICON;
+ break;
+ default:
+ break;
+ }
+
+ return hw_type;
+}
+
+static int cw1200_load_firmware_cw1200(struct cw1200_common *priv)
+{
+ int ret, block, num_blocks;
+ unsigned i;
+ u32 val32;
+ u32 put = 0, get = 0;
+ u8 *buf = NULL;
+ const char *fw_path;
+ const struct firmware *firmware = NULL;
+
+ /* Macroses are local. */
+#define APB_WRITE(reg, val) \
+ do { \
+ ret = cw1200_apb_write_32(priv, CW1200_APB(reg), (val)); \
+ if (ret < 0) \
+ goto error; \
+ } while (0)
+#define APB_READ(reg, val) \
+ do { \
+ ret = cw1200_apb_read_32(priv, CW1200_APB(reg), &(val)); \
+ if (ret < 0) \
+ goto error; \
+ } while (0)
+#define REG_WRITE(reg, val) \
+ do { \
+ ret = cw1200_reg_write_32(priv, (reg), (val)); \
+ if (ret < 0) \
+ goto error; \
+ } while (0)
+#define REG_READ(reg, val) \
+ do { \
+ ret = cw1200_reg_read_32(priv, (reg), &(val)); \
+ if (ret < 0) \
+ goto error; \
+ } while (0)
+
+ switch (priv->hw_revision) {
+ case CW1200_HW_REV_CUT10:
+ fw_path = FIRMWARE_CUT10;
+ if (!priv->sdd_path)
+ priv->sdd_path = SDD_FILE_10;
+ break;
+ case CW1200_HW_REV_CUT11:
+ fw_path = FIRMWARE_CUT11;
+ if (!priv->sdd_path)
+ priv->sdd_path = SDD_FILE_11;
+ break;
+ case CW1200_HW_REV_CUT20:
+ fw_path = FIRMWARE_CUT20;
+ if (!priv->sdd_path)
+ priv->sdd_path = SDD_FILE_20;
+ break;
+ case CW1200_HW_REV_CUT22:
+ fw_path = FIRMWARE_CUT22;
+ if (!priv->sdd_path)
+ priv->sdd_path = SDD_FILE_22;
+ break;
+ case CW1X60_HW_REV:
+ fw_path = FIRMWARE_CW1X60;
+ if (!priv->sdd_path)
+ priv->sdd_path = SDD_FILE_CW1X60;
+ break;
+ default:
+ pr_err("Invalid silicon revision %d.\n", priv->hw_revision);
+ return -EINVAL;
+ }
+
+ /* Initialize common registers */
+ APB_WRITE(DOWNLOAD_IMAGE_SIZE_REG, DOWNLOAD_ARE_YOU_HERE);
+ APB_WRITE(DOWNLOAD_PUT_REG, 0);
+ APB_WRITE(DOWNLOAD_GET_REG, 0);
+ APB_WRITE(DOWNLOAD_STATUS_REG, DOWNLOAD_PENDING);
+ APB_WRITE(DOWNLOAD_FLAGS_REG, 0);
+
+ /* Write the NOP Instruction */
+ REG_WRITE(ST90TDS_SRAM_BASE_ADDR_REG_ID, 0xFFF20000);
+ REG_WRITE(ST90TDS_AHB_DPORT_REG_ID, 0xEAFFFFFE);
+
+ /* Release CPU from RESET */
+ REG_READ(ST90TDS_CONFIG_REG_ID, val32);
+ val32 &= ~ST90TDS_CONFIG_CPU_RESET_BIT;
+ REG_WRITE(ST90TDS_CONFIG_REG_ID, val32);
+
+ /* Enable Clock */
+ val32 &= ~ST90TDS_CONFIG_CPU_CLK_DIS_BIT;
+ REG_WRITE(ST90TDS_CONFIG_REG_ID, val32);
+
+ /* Load a firmware file */
+ ret = request_firmware(&firmware, fw_path, priv->pdev);
+ if (ret) {
+ pr_err("Can't load firmware file %s.\n", fw_path);
+ goto error;
+ }
+
+ buf = kmalloc(DOWNLOAD_BLOCK_SIZE, GFP_KERNEL | GFP_DMA);
+ if (!buf) {
+ pr_err("Can't allocate firmware load buffer.\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* Check if the bootloader is ready */
+ for (i = 0; i < 100; i += 1 + i / 2) {
+ APB_READ(DOWNLOAD_IMAGE_SIZE_REG, val32);
+ if (val32 == DOWNLOAD_I_AM_HERE)
+ break;
+ mdelay(i);
+ } /* End of for loop */
+
+ if (val32 != DOWNLOAD_I_AM_HERE) {
+ pr_err("Bootloader is not ready.\n");
+ ret = -ETIMEDOUT;
+ goto error;
+ }
+
+ /* Calculcate number of download blocks */
+ num_blocks = (firmware->size - 1) / DOWNLOAD_BLOCK_SIZE + 1;
+
+ /* Updating the length in Download Ctrl Area */
+ val32 = firmware->size; /* Explicit cast from size_t to u32 */
+ APB_WRITE(DOWNLOAD_IMAGE_SIZE_REG, val32);
+
+ /* Firmware downloading loop */
+ for (block = 0; block < num_blocks; block++) {
+ size_t tx_size;
+ size_t block_size;
+
+ /* check the download status */
+ APB_READ(DOWNLOAD_STATUS_REG, val32);
+ if (val32 != DOWNLOAD_PENDING) {
+ pr_err("Bootloader reported error %d.\n", val32);
+ ret = -EIO;
+ goto error;
+ }
+
+ /* loop until put - get <= 24K */
+ for (i = 0; i < 100; i++) {
+ APB_READ(DOWNLOAD_GET_REG, get);
+ if ((put - get) <=
+ (DOWNLOAD_FIFO_SIZE - DOWNLOAD_BLOCK_SIZE))
+ break;
+ mdelay(i);
+ }
+
+ if ((put - get) > (DOWNLOAD_FIFO_SIZE - DOWNLOAD_BLOCK_SIZE)) {
+ pr_err("Timeout waiting for FIFO.\n");
+ ret = -ETIMEDOUT;
+ goto error;
+ }
+
+ /* calculate the block size */
+ tx_size = block_size = min((size_t)(firmware->size - put),
+ (size_t)DOWNLOAD_BLOCK_SIZE);
+
+ memcpy(buf, &firmware->data[put], block_size);
+ if (block_size < DOWNLOAD_BLOCK_SIZE) {
+ memset(&buf[block_size], 0,
+ DOWNLOAD_BLOCK_SIZE - block_size);
+ tx_size = DOWNLOAD_BLOCK_SIZE;
+ }
+
+ /* send the block to sram */
+ ret = cw1200_apb_write(priv,
+ CW1200_APB(DOWNLOAD_FIFO_OFFSET +
+ (put & (DOWNLOAD_FIFO_SIZE - 1))),
+ buf, tx_size);
+ if (ret < 0) {
+ pr_err("Can't write firmware block @ %d!\n",
+ put & (DOWNLOAD_FIFO_SIZE - 1));
+ goto error;
+ }
+
+ /* update the put register */
+ put += block_size;
+ APB_WRITE(DOWNLOAD_PUT_REG, put);
+ } /* End of firmware download loop */
+
+ /* Wait for the download completion */
+ for (i = 0; i < 300; i += 1 + i / 2) {
+ APB_READ(DOWNLOAD_STATUS_REG, val32);
+ if (val32 != DOWNLOAD_PENDING)
+ break;
+ mdelay(i);
+ }
+ if (val32 != DOWNLOAD_SUCCESS) {
+ pr_err("Wait for download completion failed: 0x%.8X\n", val32);
+ ret = -ETIMEDOUT;
+ goto error;
+ } else {
+ pr_info("Firmware download completed.\n");
+ ret = 0;
+ }
+
+error:
+ kfree(buf);
+ if (firmware)
+ release_firmware(firmware);
+ return ret;
+
+#undef APB_WRITE
+#undef APB_READ
+#undef REG_WRITE
+#undef REG_READ
+}
+
+
+static int config_reg_read(struct cw1200_common *priv, u32 *val)
+{
+ switch (priv->hw_type) {
+ case HIF_9000_SILICON_VERSATILE: {
+ u16 val16;
+ int ret = cw1200_reg_read_16(priv,
+ ST90TDS_CONFIG_REG_ID,
+ &val16);
+ if (ret < 0)
+ return ret;
+ *val = val16;
+ return 0;
+ }
+ case HIF_8601_VERSATILE:
+ case HIF_8601_SILICON:
+ default:
+ cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, val);
+ break;
+ }
+ return 0;
+}
+
+static int config_reg_write(struct cw1200_common *priv, u32 val)
+{
+ switch (priv->hw_type) {
+ case HIF_9000_SILICON_VERSATILE:
+ return cw1200_reg_write_16(priv,
+ ST90TDS_CONFIG_REG_ID,
+ (u16)val);
+ case HIF_8601_VERSATILE:
+ case HIF_8601_SILICON:
+ default:
+ return cw1200_reg_write_32(priv, ST90TDS_CONFIG_REG_ID, val);
+ break;
+ }
+ return 0;
+}
+
+int cw1200_load_firmware(struct cw1200_common *priv)
+{
+ int ret;
+ int i;
+ u32 val32;
+ u16 val16;
+ int major_revision = -1;
+
+ /* Read CONFIG Register */
+ ret = cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32);
+ if (ret < 0) {
+ pr_err("Can't read config register.\n");
+ goto out;
+ }
+
+ if (val32 == 0 || val32 == 0xffffffff) {
+ pr_err("Bad config register value (0x%08x)\n", val32);
+ ret = -EIO;
+ goto out;
+ }
+
+ priv->hw_type = cw1200_get_hw_type(val32, &major_revision);
+ if (priv->hw_type < 0) {
+ pr_err("Can't deduce hardware type.\n");
+ ret = -ENOTSUPP;
+ goto out;
+ }
+
+ /* Set DPLL Reg value, and read back to confirm writes work */
+ ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID,
+ cw1200_dpll_from_clk(priv->hw_refclk));
+ if (ret < 0) {
+ pr_err("Can't write DPLL register.\n");
+ goto out;
+ }
+
+ msleep(20);
+
+ ret = cw1200_reg_read_32(priv,
+ ST90TDS_TSET_GEN_R_W_REG_ID, &val32);
+ if (ret < 0) {
+ pr_err("Can't read DPLL register.\n");
+ goto out;
+ }
+
+ if (val32 != cw1200_dpll_from_clk(priv->hw_refclk)) {
+ pr_err("Unable to initialise DPLL register. Wrote 0x%.8X, Read 0x%.8X.\n",
+ cw1200_dpll_from_clk(priv->hw_refclk), val32);
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Set wakeup bit in device */
+ ret = cw1200_reg_read_16(priv, ST90TDS_CONTROL_REG_ID, &val16);
+ if (ret < 0) {
+ pr_err("set_wakeup: can't read control register.\n");
+ goto out;
+ }
+
+ ret = cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID,
+ val16 | ST90TDS_CONT_WUP_BIT);
+ if (ret < 0) {
+ pr_err("set_wakeup: can't write control register.\n");
+ goto out;
+ }
+
+ /* Wait for wakeup */
+ for (i = 0; i < 300; i += (1 + i / 2)) {
+ ret = cw1200_reg_read_16(priv,
+ ST90TDS_CONTROL_REG_ID, &val16);
+ if (ret < 0) {
+ pr_err("wait_for_wakeup: can't read control register.\n");
+ goto out;
+ }
+
+ if (val16 & ST90TDS_CONT_RDY_BIT)
+ break;
+
+ msleep(i);
+ }
+
+ if ((val16 & ST90TDS_CONT_RDY_BIT) == 0) {
+ pr_err("wait_for_wakeup: device is not responding.\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ switch (major_revision) {
+ case 1:
+ /* CW1200 Hardware detection logic : Check for CUT1.1 */
+ ret = cw1200_ahb_read_32(priv, CW1200_CUT_ID_ADDR, &val32);
+ if (ret) {
+ pr_err("HW detection: can't read CUT ID.\n");
+ goto out;
+ }
+
+ switch (val32) {
+ case CW1200_CUT_11_ID_STR:
+ pr_info("CW1x00 Cut 1.1 silicon detected.\n");
+ priv->hw_revision = CW1200_HW_REV_CUT11;
+ break;
+ default:
+ pr_info("CW1x00 Cut 1.0 silicon detected.\n");
+ priv->hw_revision = CW1200_HW_REV_CUT10;
+ break;
+ }
+
+ /* According to ST-E, CUT<2.0 has busted BA TID0-3.
+ Just disable it entirely...
+ */
+ priv->ba_rx_tid_mask = 0;
+ priv->ba_tx_tid_mask = 0;
+ break;
+ case 2: {
+ u32 ar1, ar2, ar3;
+ ret = cw1200_ahb_read_32(priv, CW1200_CUT2_ID_ADDR, &ar1);
+ if (ret) {
+ pr_err("(1) HW detection: can't read CUT ID\n");
+ goto out;
+ }
+ ret = cw1200_ahb_read_32(priv, CW1200_CUT2_ID_ADDR + 4, &ar2);
+ if (ret) {
+ pr_err("(2) HW detection: can't read CUT ID.\n");
+ goto out;
+ }
+
+ ret = cw1200_ahb_read_32(priv, CW1200_CUT2_ID_ADDR + 8, &ar3);
+ if (ret) {
+ pr_err("(3) HW detection: can't read CUT ID.\n");
+ goto out;
+ }
+
+ if (ar1 == CW1200_CUT_22_ID_STR1 &&
+ ar2 == CW1200_CUT_22_ID_STR2 &&
+ ar3 == CW1200_CUT_22_ID_STR3) {
+ pr_info("CW1x00 Cut 2.2 silicon detected.\n");
+ priv->hw_revision = CW1200_HW_REV_CUT22;
+ } else {
+ pr_info("CW1x00 Cut 2.0 silicon detected.\n");
+ priv->hw_revision = CW1200_HW_REV_CUT20;
+ }
+ break;
+ }
+ case 4:
+ pr_info("CW1x60 silicon detected.\n");
+ priv->hw_revision = CW1X60_HW_REV;
+ break;
+ default:
+ pr_err("Unsupported silicon major revision %d.\n",
+ major_revision);
+ ret = -ENOTSUPP;
+ goto out;
+ }
+
+ /* Checking for access mode */
+ ret = config_reg_read(priv, &val32);
+ if (ret < 0) {
+ pr_err("Can't read config register.\n");
+ goto out;
+ }
+
+ if (!(val32 & ST90TDS_CONFIG_ACCESS_MODE_BIT)) {
+ pr_err("Device is already in QUEUE mode!\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (priv->hw_type) {
+ case HIF_8601_SILICON:
+ if (priv->hw_revision == CW1X60_HW_REV) {
+ pr_err("Can't handle CW1160/1260 firmware load yet.\n");
+ ret = -ENOTSUPP;
+ goto out;
+ }
+ ret = cw1200_load_firmware_cw1200(priv);
+ break;
+ default:
+ pr_err("Can't perform firmware load for hw type %d.\n",
+ priv->hw_type);
+ ret = -ENOTSUPP;
+ goto out;
+ }
+ if (ret < 0) {
+ pr_err("Firmware load error.\n");
+ goto out;
+ }
+
+ /* Enable interrupt signalling */
+ priv->hwbus_ops->lock(priv->hwbus_priv);
+ ret = __cw1200_irq_enable(priv, 1);
+ priv->hwbus_ops->unlock(priv->hwbus_priv);
+ if (ret < 0)
+ goto unsubscribe;
+
+ /* Configure device for MESSSAGE MODE */
+ ret = config_reg_read(priv, &val32);
+ if (ret < 0) {
+ pr_err("Can't read config register.\n");
+ goto unsubscribe;
+ }
+ ret = config_reg_write(priv, val32 & ~ST90TDS_CONFIG_ACCESS_MODE_BIT);
+ if (ret < 0) {
+ pr_err("Can't write config register.\n");
+ goto unsubscribe;
+ }
+
+ /* Unless we read the CONFIG Register we are
+ * not able to get an interrupt
+ */
+ mdelay(10);
+ config_reg_read(priv, &val32);
+
+out:
+ return ret;
+
+unsubscribe:
+ /* Disable interrupt signalling */
+ priv->hwbus_ops->lock(priv->hwbus_priv);
+ ret = __cw1200_irq_enable(priv, 0);
+ priv->hwbus_ops->unlock(priv->hwbus_priv);
+ return ret;
+}
diff --git a/drivers/net/wireless/cw1200/fwio.h b/drivers/net/wireless/cw1200/fwio.h
new file mode 100644
index 000000000000..ea3099362cdf
--- /dev/null
+++ b/drivers/net/wireless/cw1200/fwio.h
@@ -0,0 +1,39 @@
+/*
+ * Firmware API for mac80211 ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * Based on:
+ * ST-Ericsson UMAC CW1200 driver which is
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Ajitpal Singh <ajitpal.singh@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FWIO_H_INCLUDED
+#define FWIO_H_INCLUDED
+
+#define BOOTLOADER_CW1X60 "boot_cw1x60.bin"
+#define FIRMWARE_CW1X60 "wsm_cw1x60.bin"
+#define FIRMWARE_CUT22 "wsm_22.bin"
+#define FIRMWARE_CUT20 "wsm_20.bin"
+#define FIRMWARE_CUT11 "wsm_11.bin"
+#define FIRMWARE_CUT10 "wsm_10.bin"
+#define SDD_FILE_CW1X60 "sdd_cw1x60.bin"
+#define SDD_FILE_22 "sdd_22.bin"
+#define SDD_FILE_20 "sdd_20.bin"
+#define SDD_FILE_11 "sdd_11.bin"
+#define SDD_FILE_10 "sdd_10.bin"
+
+int cw1200_load_firmware(struct cw1200_common *priv);
+
+/* SDD definitions */
+#define SDD_PTA_CFG_ELT_ID 0xEB
+#define SDD_REFERENCE_FREQUENCY_ELT_ID 0xc5
+u32 cw1200_dpll_from_clk(u16 clk);
+
+#endif
diff --git a/drivers/net/wireless/cw1200/hwbus.h b/drivers/net/wireless/cw1200/hwbus.h
new file mode 100644
index 000000000000..8b2fc831c3de
--- /dev/null
+++ b/drivers/net/wireless/cw1200/hwbus.h
@@ -0,0 +1,33 @@
+/*
+ * Common hwbus abstraction layer interface for cw1200 wireless driver
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CW1200_HWBUS_H
+#define CW1200_HWBUS_H
+
+struct hwbus_priv;
+
+void cw1200_irq_handler(struct cw1200_common *priv);
+
+/* This MUST be wrapped with hwbus_ops->lock/unlock! */
+int __cw1200_irq_enable(struct cw1200_common *priv, int enable);
+
+struct hwbus_ops {
+ int (*hwbus_memcpy_fromio)(struct hwbus_priv *self, unsigned int addr,
+ void *dst, int count);
+ int (*hwbus_memcpy_toio)(struct hwbus_priv *self, unsigned int addr,
+ const void *src, int count);
+ void (*lock)(struct hwbus_priv *self);
+ void (*unlock)(struct hwbus_priv *self);
+ size_t (*align_size)(struct hwbus_priv *self, size_t size);
+ int (*power_mgmt)(struct hwbus_priv *self, bool suspend);
+};
+
+#endif /* CW1200_HWBUS_H */
diff --git a/drivers/net/wireless/cw1200/hwio.c b/drivers/net/wireless/cw1200/hwio.c
new file mode 100644
index 000000000000..ff230b7aeedd
--- /dev/null
+++ b/drivers/net/wireless/cw1200/hwio.c
@@ -0,0 +1,312 @@
+/*
+ * Low-level device IO routines for ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * Based on:
+ * ST-Ericsson UMAC CW1200 driver, which is
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Ajitpal Singh <ajitpal.singh@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+
+#include "cw1200.h"
+#include "hwio.h"
+#include "hwbus.h"
+
+ /* Sdio addr is 4*spi_addr */
+#define SPI_REG_ADDR_TO_SDIO(spi_reg_addr) ((spi_reg_addr) << 2)
+#define SDIO_ADDR17BIT(buf_id, mpf, rfu, reg_id_ofs) \
+ ((((buf_id) & 0x1F) << 7) \
+ | (((mpf) & 1) << 6) \
+ | (((rfu) & 1) << 5) \
+ | (((reg_id_ofs) & 0x1F) << 0))
+#define MAX_RETRY 3
+
+
+static int __cw1200_reg_read(struct cw1200_common *priv, u16 addr,
+ void *buf, size_t buf_len, int buf_id)
+{
+ u16 addr_sdio;
+ u32 sdio_reg_addr_17bit;
+
+ /* Check if buffer is aligned to 4 byte boundary */
+ if (WARN_ON(((unsigned long)buf & 3) && (buf_len > 4))) {
+ pr_err("buffer is not aligned.\n");
+ return -EINVAL;
+ }
+
+ /* Convert to SDIO Register Address */
+ addr_sdio = SPI_REG_ADDR_TO_SDIO(addr);
+ sdio_reg_addr_17bit = SDIO_ADDR17BIT(buf_id, 0, 0, addr_sdio);
+
+ return priv->hwbus_ops->hwbus_memcpy_fromio(priv->hwbus_priv,
+ sdio_reg_addr_17bit,
+ buf, buf_len);
+}
+
+static int __cw1200_reg_write(struct cw1200_common *priv, u16 addr,
+ const void *buf, size_t buf_len, int buf_id)
+{
+ u16 addr_sdio;
+ u32 sdio_reg_addr_17bit;
+
+ /* Convert to SDIO Register Address */
+ addr_sdio = SPI_REG_ADDR_TO_SDIO(addr);
+ sdio_reg_addr_17bit = SDIO_ADDR17BIT(buf_id, 0, 0, addr_sdio);
+
+ return priv->hwbus_ops->hwbus_memcpy_toio(priv->hwbus_priv,
+ sdio_reg_addr_17bit,
+ buf, buf_len);
+}
+
+static inline int __cw1200_reg_read_32(struct cw1200_common *priv,
+ u16 addr, u32 *val)
+{
+ __le32 tmp;
+ int i = __cw1200_reg_read(priv, addr, &tmp, sizeof(tmp), 0);
+ *val = le32_to_cpu(tmp);
+ return i;
+}
+
+static inline int __cw1200_reg_write_32(struct cw1200_common *priv,
+ u16 addr, u32 val)
+{
+ __le32 tmp = cpu_to_le32(val);
+ return __cw1200_reg_write(priv, addr, &tmp, sizeof(tmp), 0);
+}
+
+static inline int __cw1200_reg_read_16(struct cw1200_common *priv,
+ u16 addr, u16 *val)
+{
+ __le16 tmp;
+ int i = __cw1200_reg_read(priv, addr, &tmp, sizeof(tmp), 0);
+ *val = le16_to_cpu(tmp);
+ return i;
+}
+
+static inline int __cw1200_reg_write_16(struct cw1200_common *priv,
+ u16 addr, u16 val)
+{
+ __le16 tmp = cpu_to_le16(val);
+ return __cw1200_reg_write(priv, addr, &tmp, sizeof(tmp), 0);
+}
+
+int cw1200_reg_read(struct cw1200_common *priv, u16 addr, void *buf,
+ size_t buf_len)
+{
+ int ret;
+ priv->hwbus_ops->lock(priv->hwbus_priv);
+ ret = __cw1200_reg_read(priv, addr, buf, buf_len, 0);
+ priv->hwbus_ops->unlock(priv->hwbus_priv);
+ return ret;
+}
+
+int cw1200_reg_write(struct cw1200_common *priv, u16 addr, const void *buf,
+ size_t buf_len)
+{
+ int ret;
+ priv->hwbus_ops->lock(priv->hwbus_priv);
+ ret = __cw1200_reg_write(priv, addr, buf, buf_len, 0);
+ priv->hwbus_ops->unlock(priv->hwbus_priv);
+ return ret;
+}
+
+int cw1200_data_read(struct cw1200_common *priv, void *buf, size_t buf_len)
+{
+ int ret, retry = 1;
+ int buf_id_rx = priv->buf_id_rx;
+
+ priv->hwbus_ops->lock(priv->hwbus_priv);
+
+ while (retry <= MAX_RETRY) {
+ ret = __cw1200_reg_read(priv,
+ ST90TDS_IN_OUT_QUEUE_REG_ID, buf,
+ buf_len, buf_id_rx + 1);
+ if (!ret) {
+ buf_id_rx = (buf_id_rx + 1) & 3;
+ priv->buf_id_rx = buf_id_rx;
+ break;
+ } else {
+ retry++;
+ mdelay(1);
+ pr_err("error :[%d]\n", ret);
+ }
+ }
+
+ priv->hwbus_ops->unlock(priv->hwbus_priv);
+ return ret;
+}
+
+int cw1200_data_write(struct cw1200_common *priv, const void *buf,
+ size_t buf_len)
+{
+ int ret, retry = 1;
+ int buf_id_tx = priv->buf_id_tx;
+
+ priv->hwbus_ops->lock(priv->hwbus_priv);
+
+ while (retry <= MAX_RETRY) {
+ ret = __cw1200_reg_write(priv,
+ ST90TDS_IN_OUT_QUEUE_REG_ID, buf,
+ buf_len, buf_id_tx);
+ if (!ret) {
+ buf_id_tx = (buf_id_tx + 1) & 31;
+ priv->buf_id_tx = buf_id_tx;
+ break;
+ } else {
+ retry++;
+ mdelay(1);
+ pr_err("error :[%d]\n", ret);
+ }
+ }
+
+ priv->hwbus_ops->unlock(priv->hwbus_priv);
+ return ret;
+}
+
+int cw1200_indirect_read(struct cw1200_common *priv, u32 addr, void *buf,
+ size_t buf_len, u32 prefetch, u16 port_addr)
+{
+ u32 val32 = 0;
+ int i, ret;
+
+ if ((buf_len / 2) >= 0x1000) {
+ pr_err("Can't read more than 0xfff words.\n");
+ return -EINVAL;
+ }
+
+ priv->hwbus_ops->lock(priv->hwbus_priv);
+ /* Write address */
+ ret = __cw1200_reg_write_32(priv, ST90TDS_SRAM_BASE_ADDR_REG_ID, addr);
+ if (ret < 0) {
+ pr_err("Can't write address register.\n");
+ goto out;
+ }
+
+ /* Read CONFIG Register Value - We will read 32 bits */
+ ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32);
+ if (ret < 0) {
+ pr_err("Can't read config register.\n");
+ goto out;
+ }
+
+ /* Set PREFETCH bit */
+ ret = __cw1200_reg_write_32(priv, ST90TDS_CONFIG_REG_ID,
+ val32 | prefetch);
+ if (ret < 0) {
+ pr_err("Can't write prefetch bit.\n");
+ goto out;
+ }
+
+ /* Check for PRE-FETCH bit to be cleared */
+ for (i = 0; i < 20; i++) {
+ ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32);
+ if (ret < 0) {
+ pr_err("Can't check prefetch bit.\n");
+ goto out;
+ }
+ if (!(val32 & prefetch))
+ break;
+
+ mdelay(i);
+ }
+
+ if (val32 & prefetch) {
+ pr_err("Prefetch bit is not cleared.\n");
+ goto out;
+ }
+
+ /* Read data port */
+ ret = __cw1200_reg_read(priv, port_addr, buf, buf_len, 0);
+ if (ret < 0) {
+ pr_err("Can't read data port.\n");
+ goto out;
+ }
+
+out:
+ priv->hwbus_ops->unlock(priv->hwbus_priv);
+ return ret;
+}
+
+int cw1200_apb_write(struct cw1200_common *priv, u32 addr, const void *buf,
+ size_t buf_len)
+{
+ int ret;
+
+ if ((buf_len / 2) >= 0x1000) {
+ pr_err("Can't write more than 0xfff words.\n");
+ return -EINVAL;
+ }
+
+ priv->hwbus_ops->lock(priv->hwbus_priv);
+
+ /* Write address */
+ ret = __cw1200_reg_write_32(priv, ST90TDS_SRAM_BASE_ADDR_REG_ID, addr);
+ if (ret < 0) {
+ pr_err("Can't write address register.\n");
+ goto out;
+ }
+
+ /* Write data port */
+ ret = __cw1200_reg_write(priv, ST90TDS_SRAM_DPORT_REG_ID,
+ buf, buf_len, 0);
+ if (ret < 0) {
+ pr_err("Can't write data port.\n");
+ goto out;
+ }
+
+out:
+ priv->hwbus_ops->unlock(priv->hwbus_priv);
+ return ret;
+}
+
+int __cw1200_irq_enable(struct cw1200_common *priv, int enable)
+{
+ u32 val32;
+ u16 val16;
+ int ret;
+
+ if (HIF_8601_SILICON == priv->hw_type) {
+ ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32);
+ if (ret < 0) {
+ pr_err("Can't read config register.\n");
+ return ret;
+ }
+
+ if (enable)
+ val32 |= ST90TDS_CONF_IRQ_RDY_ENABLE;
+ else
+ val32 &= ~ST90TDS_CONF_IRQ_RDY_ENABLE;
+
+ ret = __cw1200_reg_write_32(priv, ST90TDS_CONFIG_REG_ID, val32);
+ if (ret < 0) {
+ pr_err("Can't write config register.\n");
+ return ret;
+ }
+ } else {
+ ret = __cw1200_reg_read_16(priv, ST90TDS_CONFIG_REG_ID, &val16);
+ if (ret < 0) {
+ pr_err("Can't read control register.\n");
+ return ret;
+ }
+
+ if (enable)
+ val16 |= ST90TDS_CONT_IRQ_RDY_ENABLE;
+ else
+ val16 &= ~ST90TDS_CONT_IRQ_RDY_ENABLE;
+
+ ret = __cw1200_reg_write_16(priv, ST90TDS_CONFIG_REG_ID, val16);
+ if (ret < 0) {
+ pr_err("Can't write control register.\n");
+ return ret;
+ }
+ }
+ return 0;
+}
diff --git a/drivers/net/wireless/cw1200/hwio.h b/drivers/net/wireless/cw1200/hwio.h
new file mode 100644
index 000000000000..ddf52669dc5b
--- /dev/null
+++ b/drivers/net/wireless/cw1200/hwio.h
@@ -0,0 +1,247 @@
+/*
+ * Low-level API for mac80211 ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * Based on:
+ * ST-Ericsson UMAC CW1200 driver which is
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Ajitpal Singh <ajitpal.singh@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CW1200_HWIO_H_INCLUDED
+#define CW1200_HWIO_H_INCLUDED
+
+/* extern */ struct cw1200_common;
+
+#define CW1200_CUT_11_ID_STR (0x302E3830)
+#define CW1200_CUT_22_ID_STR1 (0x302e3132)
+#define CW1200_CUT_22_ID_STR2 (0x32302e30)
+#define CW1200_CUT_22_ID_STR3 (0x3335)
+#define CW1200_CUT_ID_ADDR (0xFFF17F90)
+#define CW1200_CUT2_ID_ADDR (0xFFF1FF90)
+
+/* Download control area */
+/* boot loader start address in SRAM */
+#define DOWNLOAD_BOOT_LOADER_OFFSET (0x00000000)
+/* 32K, 0x4000 to 0xDFFF */
+#define DOWNLOAD_FIFO_OFFSET (0x00004000)
+/* 32K */
+#define DOWNLOAD_FIFO_SIZE (0x00008000)
+/* 128 bytes, 0xFF80 to 0xFFFF */
+#define DOWNLOAD_CTRL_OFFSET (0x0000FF80)
+#define DOWNLOAD_CTRL_DATA_DWORDS (32-6)
+
+struct download_cntl_t {
+ /* size of whole firmware file (including Cheksum), host init */
+ u32 image_size;
+ /* downloading flags */
+ u32 flags;
+ /* No. of bytes put into the download, init & updated by host */
+ u32 put;
+ /* last traced program counter, last ARM reg_pc */
+ u32 trace_pc;
+ /* No. of bytes read from the download, host init, device updates */
+ u32 get;
+ /* r0, boot losader status, host init to pending, device updates */
+ u32 status;
+ /* Extra debug info, r1 to r14 if status=r0=DOWNLOAD_EXCEPTION */
+ u32 debug_data[DOWNLOAD_CTRL_DATA_DWORDS];
+};
+
+#define DOWNLOAD_IMAGE_SIZE_REG \
+ (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, image_size))
+#define DOWNLOAD_FLAGS_REG \
+ (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, flags))
+#define DOWNLOAD_PUT_REG \
+ (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, put))
+#define DOWNLOAD_TRACE_PC_REG \
+ (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, trace_pc))
+#define DOWNLOAD_GET_REG \
+ (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, get))
+#define DOWNLOAD_STATUS_REG \
+ (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, status))
+#define DOWNLOAD_DEBUG_DATA_REG \
+ (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, debug_data))
+#define DOWNLOAD_DEBUG_DATA_LEN (108)
+
+#define DOWNLOAD_BLOCK_SIZE (1024)
+
+/* For boot loader detection */
+#define DOWNLOAD_ARE_YOU_HERE (0x87654321)
+#define DOWNLOAD_I_AM_HERE (0x12345678)
+
+/* Download error code */
+#define DOWNLOAD_PENDING (0xFFFFFFFF)
+#define DOWNLOAD_SUCCESS (0)
+#define DOWNLOAD_EXCEPTION (1)
+#define DOWNLOAD_ERR_MEM_1 (2)
+#define DOWNLOAD_ERR_MEM_2 (3)
+#define DOWNLOAD_ERR_SOFTWARE (4)
+#define DOWNLOAD_ERR_FILE_SIZE (5)
+#define DOWNLOAD_ERR_CHECKSUM (6)
+#define DOWNLOAD_ERR_OVERFLOW (7)
+#define DOWNLOAD_ERR_IMAGE (8)
+#define DOWNLOAD_ERR_HOST (9)
+#define DOWNLOAD_ERR_ABORT (10)
+
+
+#define SYS_BASE_ADDR_SILICON (0)
+#define PAC_BASE_ADDRESS_SILICON (SYS_BASE_ADDR_SILICON + 0x09000000)
+#define PAC_SHARED_MEMORY_SILICON (PAC_BASE_ADDRESS_SILICON)
+
+#define CW1200_APB(addr) (PAC_SHARED_MEMORY_SILICON + (addr))
+
+/* Device register definitions */
+
+/* WBF - SPI Register Addresses */
+#define ST90TDS_ADDR_ID_BASE (0x0000)
+/* 16/32 bits */
+#define ST90TDS_CONFIG_REG_ID (0x0000)
+/* 16/32 bits */
+#define ST90TDS_CONTROL_REG_ID (0x0001)
+/* 16 bits, Q mode W/R */
+#define ST90TDS_IN_OUT_QUEUE_REG_ID (0x0002)
+/* 32 bits, AHB bus R/W */
+#define ST90TDS_AHB_DPORT_REG_ID (0x0003)
+/* 16/32 bits */
+#define ST90TDS_SRAM_BASE_ADDR_REG_ID (0x0004)
+/* 32 bits, APB bus R/W */
+#define ST90TDS_SRAM_DPORT_REG_ID (0x0005)
+/* 32 bits, t_settle/general */
+#define ST90TDS_TSET_GEN_R_W_REG_ID (0x0006)
+/* 16 bits, Q mode read, no length */
+#define ST90TDS_FRAME_OUT_REG_ID (0x0007)
+#define ST90TDS_ADDR_ID_MAX (ST90TDS_FRAME_OUT_REG_ID)
+
+/* WBF - Control register bit set */
+/* next o/p length, bit 11 to 0 */
+#define ST90TDS_CONT_NEXT_LEN_MASK (0x0FFF)
+#define ST90TDS_CONT_WUP_BIT (BIT(12))
+#define ST90TDS_CONT_RDY_BIT (BIT(13))
+#define ST90TDS_CONT_IRQ_ENABLE (BIT(14))
+#define ST90TDS_CONT_RDY_ENABLE (BIT(15))
+#define ST90TDS_CONT_IRQ_RDY_ENABLE (BIT(14)|BIT(15))
+
+/* SPI Config register bit set */
+#define ST90TDS_CONFIG_FRAME_BIT (BIT(2))
+#define ST90TDS_CONFIG_WORD_MODE_BITS (BIT(3)|BIT(4))
+#define ST90TDS_CONFIG_WORD_MODE_1 (BIT(3))
+#define ST90TDS_CONFIG_WORD_MODE_2 (BIT(4))
+#define ST90TDS_CONFIG_ERROR_0_BIT (BIT(5))
+#define ST90TDS_CONFIG_ERROR_1_BIT (BIT(6))
+#define ST90TDS_CONFIG_ERROR_2_BIT (BIT(7))
+/* TBD: Sure??? */
+#define ST90TDS_CONFIG_CSN_FRAME_BIT (BIT(7))
+#define ST90TDS_CONFIG_ERROR_3_BIT (BIT(8))
+#define ST90TDS_CONFIG_ERROR_4_BIT (BIT(9))
+/* QueueM */
+#define ST90TDS_CONFIG_ACCESS_MODE_BIT (BIT(10))
+/* AHB bus */
+#define ST90TDS_CONFIG_AHB_PRFETCH_BIT (BIT(11))
+#define ST90TDS_CONFIG_CPU_CLK_DIS_BIT (BIT(12))
+/* APB bus */
+#define ST90TDS_CONFIG_PRFETCH_BIT (BIT(13))
+/* cpu reset */
+#define ST90TDS_CONFIG_CPU_RESET_BIT (BIT(14))
+#define ST90TDS_CONFIG_CLEAR_INT_BIT (BIT(15))
+
+/* For CW1200 the IRQ Enable and Ready Bits are in CONFIG register */
+#define ST90TDS_CONF_IRQ_ENABLE (BIT(16))
+#define ST90TDS_CONF_RDY_ENABLE (BIT(17))
+#define ST90TDS_CONF_IRQ_RDY_ENABLE (BIT(16)|BIT(17))
+
+int cw1200_data_read(struct cw1200_common *priv,
+ void *buf, size_t buf_len);
+int cw1200_data_write(struct cw1200_common *priv,
+ const void *buf, size_t buf_len);
+
+int cw1200_reg_read(struct cw1200_common *priv, u16 addr,
+ void *buf, size_t buf_len);
+int cw1200_reg_write(struct cw1200_common *priv, u16 addr,
+ const void *buf, size_t buf_len);
+
+static inline int cw1200_reg_read_16(struct cw1200_common *priv,
+ u16 addr, u16 *val)
+{
+ __le32 tmp;
+ int i;
+ i = cw1200_reg_read(priv, addr, &tmp, sizeof(tmp));
+ *val = le32_to_cpu(tmp) & 0xfffff;
+ return i;
+}
+
+static inline int cw1200_reg_write_16(struct cw1200_common *priv,
+ u16 addr, u16 val)
+{
+ __le32 tmp = cpu_to_le32((u32)val);
+ return cw1200_reg_write(priv, addr, &tmp, sizeof(tmp));
+}
+
+static inline int cw1200_reg_read_32(struct cw1200_common *priv,
+ u16 addr, u32 *val)
+{
+ __le32 tmp;
+ int i = cw1200_reg_read(priv, addr, &tmp, sizeof(tmp));
+ *val = le32_to_cpu(tmp);
+ return i;
+}
+
+static inline int cw1200_reg_write_32(struct cw1200_common *priv,
+ u16 addr, u32 val)
+{
+ __le32 tmp = cpu_to_le32(val);
+ return cw1200_reg_write(priv, addr, &tmp, sizeof(val));
+}
+
+int cw1200_indirect_read(struct cw1200_common *priv, u32 addr, void *buf,
+ size_t buf_len, u32 prefetch, u16 port_addr);
+int cw1200_apb_write(struct cw1200_common *priv, u32 addr, const void *buf,
+ size_t buf_len);
+
+static inline int cw1200_apb_read(struct cw1200_common *priv, u32 addr,
+ void *buf, size_t buf_len)
+{
+ return cw1200_indirect_read(priv, addr, buf, buf_len,
+ ST90TDS_CONFIG_PRFETCH_BIT,
+ ST90TDS_SRAM_DPORT_REG_ID);
+}
+
+static inline int cw1200_ahb_read(struct cw1200_common *priv, u32 addr,
+ void *buf, size_t buf_len)
+{
+ return cw1200_indirect_read(priv, addr, buf, buf_len,
+ ST90TDS_CONFIG_AHB_PRFETCH_BIT,
+ ST90TDS_AHB_DPORT_REG_ID);
+}
+
+static inline int cw1200_apb_read_32(struct cw1200_common *priv,
+ u32 addr, u32 *val)
+{
+ __le32 tmp;
+ int i = cw1200_apb_read(priv, addr, &tmp, sizeof(tmp));
+ *val = le32_to_cpu(tmp);
+ return i;
+}
+
+static inline int cw1200_apb_write_32(struct cw1200_common *priv,
+ u32 addr, u32 val)
+{
+ __le32 tmp = cpu_to_le32(val);
+ return cw1200_apb_write(priv, addr, &tmp, sizeof(val));
+}
+static inline int cw1200_ahb_read_32(struct cw1200_common *priv,
+ u32 addr, u32 *val)
+{
+ __le32 tmp;
+ int i = cw1200_ahb_read(priv, addr, &tmp, sizeof(tmp));
+ *val = le32_to_cpu(tmp);
+ return i;
+}
+
+#endif /* CW1200_HWIO_H_INCLUDED */
diff --git a/drivers/net/wireless/cw1200/main.c b/drivers/net/wireless/cw1200/main.c
new file mode 100644
index 000000000000..3724e739cbf4
--- /dev/null
+++ b/drivers/net/wireless/cw1200/main.c
@@ -0,0 +1,605 @@
+/*
+ * mac80211 glue code for mac80211 ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * Based on:
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2007-2009, Christian Lamparter <chunkeey@web.de>
+ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ *
+ * Based on:
+ * - the islsm (softmac prism54) driver, which is:
+ * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
+ * - stlc45xx driver
+ * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies).
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <net/mac80211.h>
+
+#include "cw1200.h"
+#include "txrx.h"
+#include "hwbus.h"
+#include "fwio.h"
+#include "hwio.h"
+#include "bh.h"
+#include "sta.h"
+#include "scan.h"
+#include "debug.h"
+#include "pm.h"
+
+MODULE_AUTHOR("Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>");
+MODULE_DESCRIPTION("Softmac ST-Ericsson CW1200 common code");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("cw1200_core");
+
+/* Accept MAC address of the form macaddr=0x00,0x80,0xE1,0x30,0x40,0x50 */
+static u8 cw1200_mac_template[ETH_ALEN] = {0x02, 0x80, 0xe1, 0x00, 0x00, 0x00};
+module_param_array_named(macaddr, cw1200_mac_template, byte, NULL, S_IRUGO);
+MODULE_PARM_DESC(macaddr, "Override platform_data MAC address");
+
+static char *cw1200_sdd_path;
+module_param(cw1200_sdd_path, charp, 0644);
+MODULE_PARM_DESC(cw1200_sdd_path, "Override platform_data SDD file");
+static int cw1200_refclk;
+module_param(cw1200_refclk, int, 0644);
+MODULE_PARM_DESC(cw1200_refclk, "Override platform_data reference clock");
+
+int cw1200_power_mode = wsm_power_mode_quiescent;
+module_param(cw1200_power_mode, int, 0644);
+MODULE_PARM_DESC(cw1200_power_mode, "WSM power mode. 0 == active, 1 == doze, 2 == quiescent (default)");
+
+#define RATETAB_ENT(_rate, _rateid, _flags) \
+ { \
+ .bitrate = (_rate), \
+ .hw_value = (_rateid), \
+ .flags = (_flags), \
+ }
+
+static struct ieee80211_rate cw1200_rates[] = {
+ RATETAB_ENT(10, 0, 0),
+ RATETAB_ENT(20, 1, 0),
+ RATETAB_ENT(55, 2, 0),
+ RATETAB_ENT(110, 3, 0),
+ RATETAB_ENT(60, 6, 0),
+ RATETAB_ENT(90, 7, 0),
+ RATETAB_ENT(120, 8, 0),
+ RATETAB_ENT(180, 9, 0),
+ RATETAB_ENT(240, 10, 0),
+ RATETAB_ENT(360, 11, 0),
+ RATETAB_ENT(480, 12, 0),
+ RATETAB_ENT(540, 13, 0),
+};
+
+static struct ieee80211_rate cw1200_mcs_rates[] = {
+ RATETAB_ENT(65, 14, IEEE80211_TX_RC_MCS),
+ RATETAB_ENT(130, 15, IEEE80211_TX_RC_MCS),
+ RATETAB_ENT(195, 16, IEEE80211_TX_RC_MCS),
+ RATETAB_ENT(260, 17, IEEE80211_TX_RC_MCS),
+ RATETAB_ENT(390, 18, IEEE80211_TX_RC_MCS),
+ RATETAB_ENT(520, 19, IEEE80211_TX_RC_MCS),
+ RATETAB_ENT(585, 20, IEEE80211_TX_RC_MCS),
+ RATETAB_ENT(650, 21, IEEE80211_TX_RC_MCS),
+};
+
+#define cw1200_a_rates (cw1200_rates + 4)
+#define cw1200_a_rates_size (ARRAY_SIZE(cw1200_rates) - 4)
+#define cw1200_g_rates (cw1200_rates + 0)
+#define cw1200_g_rates_size (ARRAY_SIZE(cw1200_rates))
+#define cw1200_n_rates (cw1200_mcs_rates)
+#define cw1200_n_rates_size (ARRAY_SIZE(cw1200_mcs_rates))
+
+
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = IEEE80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_channel, _flags) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .center_freq = 5000 + (5 * (_channel)), \
+ .hw_value = (_channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+static struct ieee80211_channel cw1200_2ghz_chantable[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0),
+};
+
+static struct ieee80211_channel cw1200_5ghz_chantable[] = {
+ CHAN5G(34, 0), CHAN5G(36, 0),
+ CHAN5G(38, 0), CHAN5G(40, 0),
+ CHAN5G(42, 0), CHAN5G(44, 0),
+ CHAN5G(46, 0), CHAN5G(48, 0),
+ CHAN5G(52, 0), CHAN5G(56, 0),
+ CHAN5G(60, 0), CHAN5G(64, 0),
+ CHAN5G(100, 0), CHAN5G(104, 0),
+ CHAN5G(108, 0), CHAN5G(112, 0),
+ CHAN5G(116, 0), CHAN5G(120, 0),
+ CHAN5G(124, 0), CHAN5G(128, 0),
+ CHAN5G(132, 0), CHAN5G(136, 0),
+ CHAN5G(140, 0), CHAN5G(149, 0),
+ CHAN5G(153, 0), CHAN5G(157, 0),
+ CHAN5G(161, 0), CHAN5G(165, 0),
+ CHAN5G(184, 0), CHAN5G(188, 0),
+ CHAN5G(192, 0), CHAN5G(196, 0),
+ CHAN5G(200, 0), CHAN5G(204, 0),
+ CHAN5G(208, 0), CHAN5G(212, 0),
+ CHAN5G(216, 0),
+};
+
+static struct ieee80211_supported_band cw1200_band_2ghz = {
+ .channels = cw1200_2ghz_chantable,
+ .n_channels = ARRAY_SIZE(cw1200_2ghz_chantable),
+ .bitrates = cw1200_g_rates,
+ .n_bitrates = cw1200_g_rates_size,
+ .ht_cap = {
+ .cap = IEEE80211_HT_CAP_GRN_FLD |
+ (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) |
+ IEEE80211_HT_CAP_MAX_AMSDU,
+ .ht_supported = 1,
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE,
+ .mcs = {
+ .rx_mask[0] = 0xFF,
+ .rx_highest = __cpu_to_le16(0x41),
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+ },
+ },
+};
+
+static struct ieee80211_supported_band cw1200_band_5ghz = {
+ .channels = cw1200_5ghz_chantable,
+ .n_channels = ARRAY_SIZE(cw1200_5ghz_chantable),
+ .bitrates = cw1200_a_rates,
+ .n_bitrates = cw1200_a_rates_size,
+ .ht_cap = {
+ .cap = IEEE80211_HT_CAP_GRN_FLD |
+ (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) |
+ IEEE80211_HT_CAP_MAX_AMSDU,
+ .ht_supported = 1,
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE,
+ .mcs = {
+ .rx_mask[0] = 0xFF,
+ .rx_highest = __cpu_to_le16(0x41),
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+ },
+ },
+};
+
+static const unsigned long cw1200_ttl[] = {
+ 1 * HZ, /* VO */
+ 2 * HZ, /* VI */
+ 5 * HZ, /* BE */
+ 10 * HZ /* BK */
+};
+
+static const struct ieee80211_ops cw1200_ops = {
+ .start = cw1200_start,
+ .stop = cw1200_stop,
+ .add_interface = cw1200_add_interface,
+ .remove_interface = cw1200_remove_interface,
+ .change_interface = cw1200_change_interface,
+ .tx = cw1200_tx,
+ .hw_scan = cw1200_hw_scan,
+ .set_tim = cw1200_set_tim,
+ .sta_notify = cw1200_sta_notify,
+ .sta_add = cw1200_sta_add,
+ .sta_remove = cw1200_sta_remove,
+ .set_key = cw1200_set_key,
+ .set_rts_threshold = cw1200_set_rts_threshold,
+ .config = cw1200_config,
+ .bss_info_changed = cw1200_bss_info_changed,
+ .prepare_multicast = cw1200_prepare_multicast,
+ .configure_filter = cw1200_configure_filter,
+ .conf_tx = cw1200_conf_tx,
+ .get_stats = cw1200_get_stats,
+ .ampdu_action = cw1200_ampdu_action,
+ .flush = cw1200_flush,
+#ifdef CONFIG_PM
+ .suspend = cw1200_wow_suspend,
+ .resume = cw1200_wow_resume,
+#endif
+ /* Intentionally not offloaded: */
+ /*.channel_switch = cw1200_channel_switch, */
+ /*.remain_on_channel = cw1200_remain_on_channel, */
+ /*.cancel_remain_on_channel = cw1200_cancel_remain_on_channel, */
+};
+
+static int cw1200_ba_rx_tids = -1;
+static int cw1200_ba_tx_tids = -1;
+module_param(cw1200_ba_rx_tids, int, 0644);
+module_param(cw1200_ba_tx_tids, int, 0644);
+MODULE_PARM_DESC(cw1200_ba_rx_tids, "Block ACK RX TIDs");
+MODULE_PARM_DESC(cw1200_ba_tx_tids, "Block ACK TX TIDs");
+
+#ifdef CONFIG_PM
+static const struct wiphy_wowlan_support cw1200_wowlan_support = {
+ /* Support only for limited wowlan functionalities */
+ .flags = WIPHY_WOWLAN_ANY | WIPHY_WOWLAN_DISCONNECT,
+};
+#endif
+
+
+static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
+ const bool have_5ghz)
+{
+ int i, band;
+ struct ieee80211_hw *hw;
+ struct cw1200_common *priv;
+
+ hw = ieee80211_alloc_hw(sizeof(struct cw1200_common), &cw1200_ops);
+ if (!hw)
+ return NULL;
+
+ priv = hw->priv;
+ priv->hw = hw;
+ priv->hw_type = -1;
+ priv->mode = NL80211_IFTYPE_UNSPECIFIED;
+ priv->rates = cw1200_rates; /* TODO: fetch from FW */
+ priv->mcs_rates = cw1200_n_rates;
+ if (cw1200_ba_rx_tids != -1)
+ priv->ba_rx_tid_mask = cw1200_ba_rx_tids;
+ else
+ priv->ba_rx_tid_mask = 0xFF; /* Enable RX BLKACK for all TIDs */
+ if (cw1200_ba_tx_tids != -1)
+ priv->ba_tx_tid_mask = cw1200_ba_tx_tids;
+ else
+ priv->ba_tx_tid_mask = 0xff; /* Enable TX BLKACK for all TIDs */
+
+ hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+ IEEE80211_HW_SUPPORTS_UAPSD |
+ IEEE80211_HW_CONNECTION_MONITOR |
+ IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
+ IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC;
+
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO);
+
+#ifdef CONFIG_PM
+ hw->wiphy->wowlan = &cw1200_wowlan_support;
+#endif
+
+ hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+
+ hw->channel_change_time = 1000; /* TODO: find actual value */
+ hw->queues = 4;
+
+ priv->rts_threshold = -1;
+
+ hw->max_rates = 8;
+ hw->max_rate_tries = 15;
+ hw->extra_tx_headroom = WSM_TX_EXTRA_HEADROOM +
+ 8; /* TKIP IV */
+
+ hw->sta_data_size = sizeof(struct cw1200_sta_priv);
+
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &cw1200_band_2ghz;
+ if (have_5ghz)
+ hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &cw1200_band_5ghz;
+
+ /* Channel params have to be cleared before registering wiphy again */
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ struct ieee80211_supported_band *sband = hw->wiphy->bands[band];
+ if (!sband)
+ continue;
+ for (i = 0; i < sband->n_channels; i++) {
+ sband->channels[i].flags = 0;
+ sband->channels[i].max_antenna_gain = 0;
+ sband->channels[i].max_power = 30;
+ }
+ }
+
+ hw->wiphy->max_scan_ssids = 2;
+ hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
+
+ if (macaddr)
+ SET_IEEE80211_PERM_ADDR(hw, (u8 *)macaddr);
+ else
+ SET_IEEE80211_PERM_ADDR(hw, cw1200_mac_template);
+
+ /* Fix up mac address if necessary */
+ if (hw->wiphy->perm_addr[3] == 0 &&
+ hw->wiphy->perm_addr[4] == 0 &&
+ hw->wiphy->perm_addr[5] == 0) {
+ get_random_bytes(&hw->wiphy->perm_addr[3], 3);
+ }
+
+ mutex_init(&priv->wsm_cmd_mux);
+ mutex_init(&priv->conf_mutex);
+ priv->workqueue = create_singlethread_workqueue("cw1200_wq");
+ sema_init(&priv->scan.lock, 1);
+ INIT_WORK(&priv->scan.work, cw1200_scan_work);
+ INIT_DELAYED_WORK(&priv->scan.probe_work, cw1200_probe_work);
+ INIT_DELAYED_WORK(&priv->scan.timeout, cw1200_scan_timeout);
+ INIT_DELAYED_WORK(&priv->clear_recent_scan_work,
+ cw1200_clear_recent_scan_work);
+ INIT_DELAYED_WORK(&priv->join_timeout, cw1200_join_timeout);
+ INIT_WORK(&priv->unjoin_work, cw1200_unjoin_work);
+ INIT_WORK(&priv->join_complete_work, cw1200_join_complete_work);
+ INIT_WORK(&priv->wep_key_work, cw1200_wep_key_work);
+ INIT_WORK(&priv->tx_policy_upload_work, tx_policy_upload_work);
+ spin_lock_init(&priv->event_queue_lock);
+ INIT_LIST_HEAD(&priv->event_queue);
+ INIT_WORK(&priv->event_handler, cw1200_event_handler);
+ INIT_DELAYED_WORK(&priv->bss_loss_work, cw1200_bss_loss_work);
+ INIT_WORK(&priv->bss_params_work, cw1200_bss_params_work);
+ spin_lock_init(&priv->bss_loss_lock);
+ spin_lock_init(&priv->ps_state_lock);
+ INIT_WORK(&priv->set_cts_work, cw1200_set_cts_work);
+ INIT_WORK(&priv->set_tim_work, cw1200_set_tim_work);
+ INIT_WORK(&priv->multicast_start_work, cw1200_multicast_start_work);
+ INIT_WORK(&priv->multicast_stop_work, cw1200_multicast_stop_work);
+ INIT_WORK(&priv->link_id_work, cw1200_link_id_work);
+ INIT_DELAYED_WORK(&priv->link_id_gc_work, cw1200_link_id_gc_work);
+ INIT_WORK(&priv->linkid_reset_work, cw1200_link_id_reset);
+ INIT_WORK(&priv->update_filtering_work, cw1200_update_filtering_work);
+ INIT_WORK(&priv->set_beacon_wakeup_period_work,
+ cw1200_set_beacon_wakeup_period_work);
+ init_timer(&priv->mcast_timeout);
+ priv->mcast_timeout.data = (unsigned long)priv;
+ priv->mcast_timeout.function = cw1200_mcast_timeout;
+
+ if (cw1200_queue_stats_init(&priv->tx_queue_stats,
+ CW1200_LINK_ID_MAX,
+ cw1200_skb_dtor,
+ priv)) {
+ ieee80211_free_hw(hw);
+ return NULL;
+ }
+
+ for (i = 0; i < 4; ++i) {
+ if (cw1200_queue_init(&priv->tx_queue[i],
+ &priv->tx_queue_stats, i, 16,
+ cw1200_ttl[i])) {
+ for (; i > 0; i--)
+ cw1200_queue_deinit(&priv->tx_queue[i - 1]);
+ cw1200_queue_stats_deinit(&priv->tx_queue_stats);
+ ieee80211_free_hw(hw);
+ return NULL;
+ }
+ }
+
+ init_waitqueue_head(&priv->channel_switch_done);
+ init_waitqueue_head(&priv->wsm_cmd_wq);
+ init_waitqueue_head(&priv->wsm_startup_done);
+ init_waitqueue_head(&priv->ps_mode_switch_done);
+ wsm_buf_init(&priv->wsm_cmd_buf);
+ spin_lock_init(&priv->wsm_cmd.lock);
+ priv->wsm_cmd.done = 1;
+ tx_policy_init(priv);
+
+ return hw;
+}
+
+static int cw1200_register_common(struct ieee80211_hw *dev)
+{
+ struct cw1200_common *priv = dev->priv;
+ int err;
+
+#ifdef CONFIG_PM
+ err = cw1200_pm_init(&priv->pm_state, priv);
+ if (err) {
+ pr_err("Cannot init PM. (%d).\n",
+ err);
+ return err;
+ }
+#endif
+
+ err = ieee80211_register_hw(dev);
+ if (err) {
+ pr_err("Cannot register device (%d).\n",
+ err);
+#ifdef CONFIG_PM
+ cw1200_pm_deinit(&priv->pm_state);
+#endif
+ return err;
+ }
+
+ cw1200_debug_init(priv);
+
+ pr_info("Registered as '%s'\n", wiphy_name(dev->wiphy));
+ return 0;
+}
+
+static void cw1200_free_common(struct ieee80211_hw *dev)
+{
+ ieee80211_free_hw(dev);
+}
+
+static void cw1200_unregister_common(struct ieee80211_hw *dev)
+{
+ struct cw1200_common *priv = dev->priv;
+ int i;
+
+ ieee80211_unregister_hw(dev);
+
+ del_timer_sync(&priv->mcast_timeout);
+ cw1200_unregister_bh(priv);
+
+ cw1200_debug_release(priv);
+
+ mutex_destroy(&priv->conf_mutex);
+
+ wsm_buf_deinit(&priv->wsm_cmd_buf);
+
+ destroy_workqueue(priv->workqueue);
+ priv->workqueue = NULL;
+
+ if (priv->sdd) {
+ release_firmware(priv->sdd);
+ priv->sdd = NULL;
+ }
+
+ for (i = 0; i < 4; ++i)
+ cw1200_queue_deinit(&priv->tx_queue[i]);
+
+ cw1200_queue_stats_deinit(&priv->tx_queue_stats);
+#ifdef CONFIG_PM
+ cw1200_pm_deinit(&priv->pm_state);
+#endif
+}
+
+/* Clock is in KHz */
+u32 cw1200_dpll_from_clk(u16 clk_khz)
+{
+ switch (clk_khz) {
+ case 0x32C8: /* 13000 KHz */
+ return 0x1D89D241;
+ case 0x3E80: /* 16000 KHz */
+ return 0x000001E1;
+ case 0x41A0: /* 16800 KHz */
+ return 0x124931C1;
+ case 0x4B00: /* 19200 KHz */
+ return 0x00000191;
+ case 0x5DC0: /* 24000 KHz */
+ return 0x00000141;
+ case 0x6590: /* 26000 KHz */
+ return 0x0EC4F121;
+ case 0x8340: /* 33600 KHz */
+ return 0x092490E1;
+ case 0x9600: /* 38400 KHz */
+ return 0x100010C1;
+ case 0x9C40: /* 40000 KHz */
+ return 0x000000C1;
+ case 0xBB80: /* 48000 KHz */
+ return 0x000000A1;
+ case 0xCB20: /* 52000 KHz */
+ return 0x07627091;
+ default:
+ pr_err("Unknown Refclk freq (0x%04x), using 2600KHz\n",
+ clk_khz);
+ return 0x0EC4F121;
+ }
+}
+
+int cw1200_core_probe(const struct hwbus_ops *hwbus_ops,
+ struct hwbus_priv *hwbus,
+ struct device *pdev,
+ struct cw1200_common **core,
+ int ref_clk, const u8 *macaddr,
+ const char *sdd_path, bool have_5ghz)
+{
+ int err = -EINVAL;
+ struct ieee80211_hw *dev;
+ struct cw1200_common *priv;
+ struct wsm_operational_mode mode = {
+ .power_mode = cw1200_power_mode,
+ .disable_more_flag_usage = true,
+ };
+
+ dev = cw1200_init_common(macaddr, have_5ghz);
+ if (!dev)
+ goto err;
+
+ priv = dev->priv;
+ priv->hw_refclk = ref_clk;
+ if (cw1200_refclk)
+ priv->hw_refclk = cw1200_refclk;
+
+ priv->sdd_path = (char *)sdd_path;
+ if (cw1200_sdd_path)
+ priv->sdd_path = cw1200_sdd_path;
+
+ priv->hwbus_ops = hwbus_ops;
+ priv->hwbus_priv = hwbus;
+ priv->pdev = pdev;
+ SET_IEEE80211_DEV(priv->hw, pdev);
+
+ /* Pass struct cw1200_common back up */
+ *core = priv;
+
+ err = cw1200_register_bh(priv);
+ if (err)
+ goto err1;
+
+ err = cw1200_load_firmware(priv);
+ if (err)
+ goto err2;
+
+ if (wait_event_interruptible_timeout(priv->wsm_startup_done,
+ priv->firmware_ready,
+ 3*HZ) <= 0) {
+ /* TODO: Need to find how to reset device
+ in QUEUE mode properly.
+ */
+ pr_err("Timeout waiting on device startup\n");
+ err = -ETIMEDOUT;
+ goto err2;
+ }
+
+ /* Set low-power mode. */
+ wsm_set_operational_mode(priv, &mode);
+
+ /* Enable multi-TX confirmation */
+ wsm_use_multi_tx_conf(priv, true);
+
+ err = cw1200_register_common(dev);
+ if (err)
+ goto err2;
+
+ return err;
+
+err2:
+ cw1200_unregister_bh(priv);
+err1:
+ cw1200_free_common(dev);
+err:
+ *core = NULL;
+ return err;
+}
+EXPORT_SYMBOL_GPL(cw1200_core_probe);
+
+void cw1200_core_release(struct cw1200_common *self)
+{
+ /* Disable device interrupts */
+ self->hwbus_ops->lock(self->hwbus_priv);
+ __cw1200_irq_enable(self, 0);
+ self->hwbus_ops->unlock(self->hwbus_priv);
+
+ /* And then clean up */
+ cw1200_unregister_common(self->hw);
+ cw1200_free_common(self->hw);
+ return;
+}
+EXPORT_SYMBOL_GPL(cw1200_core_release);
diff --git a/drivers/net/wireless/cw1200/pm.c b/drivers/net/wireless/cw1200/pm.c
new file mode 100644
index 000000000000..b37abb9f0453
--- /dev/null
+++ b/drivers/net/wireless/cw1200/pm.c
@@ -0,0 +1,367 @@
+/*
+ * Mac80211 power management API for ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2011, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/if_ether.h>
+#include "cw1200.h"
+#include "pm.h"
+#include "sta.h"
+#include "bh.h"
+#include "hwbus.h"
+
+#define CW1200_BEACON_SKIPPING_MULTIPLIER 3
+
+struct cw1200_udp_port_filter {
+ struct wsm_udp_port_filter_hdr hdr;
+ /* Up to 4 filters are allowed. */
+ struct wsm_udp_port_filter filters[WSM_MAX_FILTER_ELEMENTS];
+} __packed;
+
+struct cw1200_ether_type_filter {
+ struct wsm_ether_type_filter_hdr hdr;
+ /* Up to 4 filters are allowed. */
+ struct wsm_ether_type_filter filters[WSM_MAX_FILTER_ELEMENTS];
+} __packed;
+
+static struct cw1200_udp_port_filter cw1200_udp_port_filter_on = {
+ .hdr.num = 2,
+ .filters = {
+ [0] = {
+ .action = WSM_FILTER_ACTION_FILTER_OUT,
+ .type = WSM_FILTER_PORT_TYPE_DST,
+ .port = __cpu_to_le16(67), /* DHCP Bootps */
+ },
+ [1] = {
+ .action = WSM_FILTER_ACTION_FILTER_OUT,
+ .type = WSM_FILTER_PORT_TYPE_DST,
+ .port = __cpu_to_le16(68), /* DHCP Bootpc */
+ },
+ }
+};
+
+static struct wsm_udp_port_filter_hdr cw1200_udp_port_filter_off = {
+ .num = 0,
+};
+
+#ifndef ETH_P_WAPI
+#define ETH_P_WAPI 0x88B4
+#endif
+
+static struct cw1200_ether_type_filter cw1200_ether_type_filter_on = {
+ .hdr.num = 4,
+ .filters = {
+ [0] = {
+ .action = WSM_FILTER_ACTION_FILTER_IN,
+ .type = __cpu_to_le16(ETH_P_IP),
+ },
+ [1] = {
+ .action = WSM_FILTER_ACTION_FILTER_IN,
+ .type = __cpu_to_le16(ETH_P_PAE),
+ },
+ [2] = {
+ .action = WSM_FILTER_ACTION_FILTER_IN,
+ .type = __cpu_to_le16(ETH_P_WAPI),
+ },
+ [3] = {
+ .action = WSM_FILTER_ACTION_FILTER_IN,
+ .type = __cpu_to_le16(ETH_P_ARP),
+ },
+ },
+};
+
+static struct wsm_ether_type_filter_hdr cw1200_ether_type_filter_off = {
+ .num = 0,
+};
+
+/* private */
+struct cw1200_suspend_state {
+ unsigned long bss_loss_tmo;
+ unsigned long join_tmo;
+ unsigned long direct_probe;
+ unsigned long link_id_gc;
+ bool beacon_skipping;
+ u8 prev_ps_mode;
+};
+
+static void cw1200_pm_stay_awake_tmo(unsigned long arg)
+{
+ /* XXX what's the point of this ? */
+}
+
+int cw1200_pm_init(struct cw1200_pm_state *pm,
+ struct cw1200_common *priv)
+{
+ spin_lock_init(&pm->lock);
+
+ init_timer(&pm->stay_awake);
+ pm->stay_awake.data = (unsigned long)pm;
+ pm->stay_awake.function = cw1200_pm_stay_awake_tmo;
+
+ return 0;
+}
+
+void cw1200_pm_deinit(struct cw1200_pm_state *pm)
+{
+ del_timer_sync(&pm->stay_awake);
+}
+
+void cw1200_pm_stay_awake(struct cw1200_pm_state *pm,
+ unsigned long tmo)
+{
+ long cur_tmo;
+ spin_lock_bh(&pm->lock);
+ cur_tmo = pm->stay_awake.expires - jiffies;
+ if (!timer_pending(&pm->stay_awake) || cur_tmo < (long)tmo)
+ mod_timer(&pm->stay_awake, jiffies + tmo);
+ spin_unlock_bh(&pm->lock);
+}
+
+static long cw1200_suspend_work(struct delayed_work *work)
+{
+ int ret = cancel_delayed_work(work);
+ long tmo;
+ if (ret > 0) {
+ /* Timer is pending */
+ tmo = work->timer.expires - jiffies;
+ if (tmo < 0)
+ tmo = 0;
+ } else {
+ tmo = -1;
+ }
+ return tmo;
+}
+
+static int cw1200_resume_work(struct cw1200_common *priv,
+ struct delayed_work *work,
+ unsigned long tmo)
+{
+ if ((long)tmo < 0)
+ return 1;
+
+ return queue_delayed_work(priv->workqueue, work, tmo);
+}
+
+int cw1200_can_suspend(struct cw1200_common *priv)
+{
+ if (atomic_read(&priv->bh_rx)) {
+ wiphy_dbg(priv->hw->wiphy, "Suspend interrupted.\n");
+ return 0;
+ }
+ return 1;
+}
+EXPORT_SYMBOL_GPL(cw1200_can_suspend);
+
+int cw1200_wow_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+{
+ struct cw1200_common *priv = hw->priv;
+ struct cw1200_pm_state *pm_state = &priv->pm_state;
+ struct cw1200_suspend_state *state;
+ int ret;
+
+ spin_lock_bh(&pm_state->lock);
+ ret = timer_pending(&pm_state->stay_awake);
+ spin_unlock_bh(&pm_state->lock);
+ if (ret)
+ return -EAGAIN;
+
+ /* Do not suspend when datapath is not idle */
+ if (priv->tx_queue_stats.num_queued)
+ return -EBUSY;
+
+ /* Make sure there is no configuration requests in progress. */
+ if (!mutex_trylock(&priv->conf_mutex))
+ return -EBUSY;
+
+ /* Ensure pending operations are done.
+ * Note also that wow_suspend must return in ~2.5sec, before
+ * watchdog is triggered.
+ */
+ if (priv->channel_switch_in_progress)
+ goto revert1;
+
+ /* Do not suspend when join is pending */
+ if (priv->join_pending)
+ goto revert1;
+
+ /* Do not suspend when scanning */
+ if (down_trylock(&priv->scan.lock))
+ goto revert1;
+
+ /* Lock TX. */
+ wsm_lock_tx_async(priv);
+
+ /* Wait to avoid possible race with bh code.
+ * But do not wait too long...
+ */
+ if (wait_event_timeout(priv->bh_evt_wq,
+ !priv->hw_bufs_used, HZ / 10) <= 0)
+ goto revert2;
+
+ /* Set UDP filter */
+ wsm_set_udp_port_filter(priv, &cw1200_udp_port_filter_on.hdr);
+
+ /* Set ethernet frame type filter */
+ wsm_set_ether_type_filter(priv, &cw1200_ether_type_filter_on.hdr);
+
+ /* Allocate state */
+ state = kzalloc(sizeof(struct cw1200_suspend_state), GFP_KERNEL);
+ if (!state)
+ goto revert3;
+
+ /* Change to legacy PS while going to suspend */
+ if (!priv->vif->p2p &&
+ priv->join_status == CW1200_JOIN_STATUS_STA &&
+ priv->powersave_mode.mode != WSM_PSM_PS) {
+ state->prev_ps_mode = priv->powersave_mode.mode;
+ priv->powersave_mode.mode = WSM_PSM_PS;
+ cw1200_set_pm(priv, &priv->powersave_mode);
+ if (wait_event_interruptible_timeout(priv->ps_mode_switch_done,
+ !priv->ps_mode_switch_in_progress, 1*HZ) <= 0) {
+ goto revert3;
+ }
+ }
+
+ /* Store delayed work states. */
+ state->bss_loss_tmo =
+ cw1200_suspend_work(&priv->bss_loss_work);
+ state->join_tmo =
+ cw1200_suspend_work(&priv->join_timeout);
+ state->direct_probe =
+ cw1200_suspend_work(&priv->scan.probe_work);
+ state->link_id_gc =
+ cw1200_suspend_work(&priv->link_id_gc_work);
+
+ cancel_delayed_work_sync(&priv->clear_recent_scan_work);
+ atomic_set(&priv->recent_scan, 0);
+
+ /* Enable beacon skipping */
+ if (priv->join_status == CW1200_JOIN_STATUS_STA &&
+ priv->join_dtim_period &&
+ !priv->has_multicast_subscription) {
+ state->beacon_skipping = true;
+ wsm_set_beacon_wakeup_period(priv,
+ priv->join_dtim_period,
+ CW1200_BEACON_SKIPPING_MULTIPLIER * priv->join_dtim_period);
+ }
+
+ /* Stop serving thread */
+ if (cw1200_bh_suspend(priv))
+ goto revert4;
+
+ ret = timer_pending(&priv->mcast_timeout);
+ if (ret)
+ goto revert5;
+
+ /* Store suspend state */
+ pm_state->suspend_state = state;
+
+ /* Enable IRQ wake */
+ ret = priv->hwbus_ops->power_mgmt(priv->hwbus_priv, true);
+ if (ret) {
+ wiphy_err(priv->hw->wiphy,
+ "PM request failed: %d. WoW is disabled.\n", ret);
+ cw1200_wow_resume(hw);
+ return -EBUSY;
+ }
+
+ /* Force resume if event is coming from the device. */
+ if (atomic_read(&priv->bh_rx)) {
+ cw1200_wow_resume(hw);
+ return -EAGAIN;
+ }
+
+ return 0;
+
+revert5:
+ WARN_ON(cw1200_bh_resume(priv));
+revert4:
+ cw1200_resume_work(priv, &priv->bss_loss_work,
+ state->bss_loss_tmo);
+ cw1200_resume_work(priv, &priv->join_timeout,
+ state->join_tmo);
+ cw1200_resume_work(priv, &priv->scan.probe_work,
+ state->direct_probe);
+ cw1200_resume_work(priv, &priv->link_id_gc_work,
+ state->link_id_gc);
+ kfree(state);
+revert3:
+ wsm_set_udp_port_filter(priv, &cw1200_udp_port_filter_off);
+ wsm_set_ether_type_filter(priv, &cw1200_ether_type_filter_off);
+revert2:
+ wsm_unlock_tx(priv);
+ up(&priv->scan.lock);
+revert1:
+ mutex_unlock(&priv->conf_mutex);
+ return -EBUSY;
+}
+
+int cw1200_wow_resume(struct ieee80211_hw *hw)
+{
+ struct cw1200_common *priv = hw->priv;
+ struct cw1200_pm_state *pm_state = &priv->pm_state;
+ struct cw1200_suspend_state *state;
+
+ state = pm_state->suspend_state;
+ pm_state->suspend_state = NULL;
+
+ /* Disable IRQ wake */
+ priv->hwbus_ops->power_mgmt(priv->hwbus_priv, false);
+
+ /* Scan.lock must be released before BH is resumed other way
+ * in case when BSS_LOST command arrived the processing of the
+ * command will be delayed.
+ */
+ up(&priv->scan.lock);
+
+ /* Resume BH thread */
+ WARN_ON(cw1200_bh_resume(priv));
+
+ /* Restores previous PS mode */
+ if (!priv->vif->p2p && priv->join_status == CW1200_JOIN_STATUS_STA) {
+ priv->powersave_mode.mode = state->prev_ps_mode;
+ cw1200_set_pm(priv, &priv->powersave_mode);
+ }
+
+ if (state->beacon_skipping) {
+ wsm_set_beacon_wakeup_period(priv, priv->beacon_int *
+ priv->join_dtim_period >
+ MAX_BEACON_SKIP_TIME_MS ? 1 :
+ priv->join_dtim_period, 0);
+ state->beacon_skipping = false;
+ }
+
+ /* Resume delayed work */
+ cw1200_resume_work(priv, &priv->bss_loss_work,
+ state->bss_loss_tmo);
+ cw1200_resume_work(priv, &priv->join_timeout,
+ state->join_tmo);
+ cw1200_resume_work(priv, &priv->scan.probe_work,
+ state->direct_probe);
+ cw1200_resume_work(priv, &priv->link_id_gc_work,
+ state->link_id_gc);
+
+ /* Remove UDP port filter */
+ wsm_set_udp_port_filter(priv, &cw1200_udp_port_filter_off);
+
+ /* Remove ethernet frame type filter */
+ wsm_set_ether_type_filter(priv, &cw1200_ether_type_filter_off);
+
+ /* Unlock datapath */
+ wsm_unlock_tx(priv);
+
+ /* Unlock configuration mutex */
+ mutex_unlock(&priv->conf_mutex);
+
+ /* Free memory */
+ kfree(state);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/cw1200/pm.h b/drivers/net/wireless/cw1200/pm.h
new file mode 100644
index 000000000000..3ed90ff22bb8
--- /dev/null
+++ b/drivers/net/wireless/cw1200/pm.h
@@ -0,0 +1,43 @@
+/*
+ * Mac80211 power management interface for ST-Ericsson CW1200 mac80211 drivers
+ *
+ * Copyright (c) 2011, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef PM_H_INCLUDED
+#define PM_H_INCLUDED
+
+/* ******************************************************************** */
+/* mac80211 API */
+
+/* extern */ struct cw1200_common;
+/* private */ struct cw1200_suspend_state;
+
+struct cw1200_pm_state {
+ struct cw1200_suspend_state *suspend_state;
+ struct timer_list stay_awake;
+ struct platform_device *pm_dev;
+ spinlock_t lock; /* Protect access */
+};
+
+#ifdef CONFIG_PM
+int cw1200_pm_init(struct cw1200_pm_state *pm,
+ struct cw1200_common *priv);
+void cw1200_pm_deinit(struct cw1200_pm_state *pm);
+int cw1200_wow_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan);
+int cw1200_wow_resume(struct ieee80211_hw *hw);
+int cw1200_can_suspend(struct cw1200_common *priv);
+void cw1200_pm_stay_awake(struct cw1200_pm_state *pm,
+ unsigned long tmo);
+#else
+static inline void cw1200_pm_stay_awake(struct cw1200_pm_state *pm,
+ unsigned long tmo) {
+}
+#endif
+#endif
diff --git a/drivers/net/wireless/cw1200/queue.c b/drivers/net/wireless/cw1200/queue.c
new file mode 100644
index 000000000000..9c3925f58d79
--- /dev/null
+++ b/drivers/net/wireless/cw1200/queue.c
@@ -0,0 +1,583 @@
+/*
+ * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <net/mac80211.h>
+#include <linux/sched.h>
+#include "queue.h"
+#include "cw1200.h"
+#include "debug.h"
+
+/* private */ struct cw1200_queue_item
+{
+ struct list_head head;
+ struct sk_buff *skb;
+ u32 packet_id;
+ unsigned long queue_timestamp;
+ unsigned long xmit_timestamp;
+ struct cw1200_txpriv txpriv;
+ u8 generation;
+};
+
+static inline void __cw1200_queue_lock(struct cw1200_queue *queue)
+{
+ struct cw1200_queue_stats *stats = queue->stats;
+ if (queue->tx_locked_cnt++ == 0) {
+ pr_debug("[TX] Queue %d is locked.\n",
+ queue->queue_id);
+ ieee80211_stop_queue(stats->priv->hw, queue->queue_id);
+ }
+}
+
+static inline void __cw1200_queue_unlock(struct cw1200_queue *queue)
+{
+ struct cw1200_queue_stats *stats = queue->stats;
+ BUG_ON(!queue->tx_locked_cnt);
+ if (--queue->tx_locked_cnt == 0) {
+ pr_debug("[TX] Queue %d is unlocked.\n",
+ queue->queue_id);
+ ieee80211_wake_queue(stats->priv->hw, queue->queue_id);
+ }
+}
+
+static inline void cw1200_queue_parse_id(u32 packet_id, u8 *queue_generation,
+ u8 *queue_id, u8 *item_generation,
+ u8 *item_id)
+{
+ *item_id = (packet_id >> 0) & 0xFF;
+ *item_generation = (packet_id >> 8) & 0xFF;
+ *queue_id = (packet_id >> 16) & 0xFF;
+ *queue_generation = (packet_id >> 24) & 0xFF;
+}
+
+static inline u32 cw1200_queue_mk_packet_id(u8 queue_generation, u8 queue_id,
+ u8 item_generation, u8 item_id)
+{
+ return ((u32)item_id << 0) |
+ ((u32)item_generation << 8) |
+ ((u32)queue_id << 16) |
+ ((u32)queue_generation << 24);
+}
+
+static void cw1200_queue_post_gc(struct cw1200_queue_stats *stats,
+ struct list_head *gc_list)
+{
+ struct cw1200_queue_item *item, *tmp;
+
+ list_for_each_entry_safe(item, tmp, gc_list, head) {
+ list_del(&item->head);
+ stats->skb_dtor(stats->priv, item->skb, &item->txpriv);
+ kfree(item);
+ }
+}
+
+static void cw1200_queue_register_post_gc(struct list_head *gc_list,
+ struct cw1200_queue_item *item)
+{
+ struct cw1200_queue_item *gc_item;
+ gc_item = kmalloc(sizeof(struct cw1200_queue_item),
+ GFP_ATOMIC);
+ BUG_ON(!gc_item);
+ memcpy(gc_item, item, sizeof(struct cw1200_queue_item));
+ list_add_tail(&gc_item->head, gc_list);
+}
+
+static void __cw1200_queue_gc(struct cw1200_queue *queue,
+ struct list_head *head,
+ bool unlock)
+{
+ struct cw1200_queue_stats *stats = queue->stats;
+ struct cw1200_queue_item *item = NULL, *tmp;
+ bool wakeup_stats = false;
+
+ list_for_each_entry_safe(item, tmp, &queue->queue, head) {
+ if (jiffies - item->queue_timestamp < queue->ttl)
+ break;
+ --queue->num_queued;
+ --queue->link_map_cache[item->txpriv.link_id];
+ spin_lock_bh(&stats->lock);
+ --stats->num_queued;
+ if (!--stats->link_map_cache[item->txpriv.link_id])
+ wakeup_stats = true;
+ spin_unlock_bh(&stats->lock);
+ cw1200_debug_tx_ttl(stats->priv);
+ cw1200_queue_register_post_gc(head, item);
+ item->skb = NULL;
+ list_move_tail(&item->head, &queue->free_pool);
+ }
+
+ if (wakeup_stats)
+ wake_up(&stats->wait_link_id_empty);
+
+ if (queue->overfull) {
+ if (queue->num_queued <= (queue->capacity >> 1)) {
+ queue->overfull = false;
+ if (unlock)
+ __cw1200_queue_unlock(queue);
+ } else if (item) {
+ unsigned long tmo = item->queue_timestamp + queue->ttl;
+ mod_timer(&queue->gc, tmo);
+ cw1200_pm_stay_awake(&stats->priv->pm_state,
+ tmo - jiffies);
+ }
+ }
+}
+
+static void cw1200_queue_gc(unsigned long arg)
+{
+ LIST_HEAD(list);
+ struct cw1200_queue *queue =
+ (struct cw1200_queue *)arg;
+
+ spin_lock_bh(&queue->lock);
+ __cw1200_queue_gc(queue, &list, true);
+ spin_unlock_bh(&queue->lock);
+ cw1200_queue_post_gc(queue->stats, &list);
+}
+
+int cw1200_queue_stats_init(struct cw1200_queue_stats *stats,
+ size_t map_capacity,
+ cw1200_queue_skb_dtor_t skb_dtor,
+ struct cw1200_common *priv)
+{
+ memset(stats, 0, sizeof(*stats));
+ stats->map_capacity = map_capacity;
+ stats->skb_dtor = skb_dtor;
+ stats->priv = priv;
+ spin_lock_init(&stats->lock);
+ init_waitqueue_head(&stats->wait_link_id_empty);
+
+ stats->link_map_cache = kzalloc(sizeof(int) * map_capacity,
+ GFP_KERNEL);
+ if (!stats->link_map_cache)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int cw1200_queue_init(struct cw1200_queue *queue,
+ struct cw1200_queue_stats *stats,
+ u8 queue_id,
+ size_t capacity,
+ unsigned long ttl)
+{
+ size_t i;
+
+ memset(queue, 0, sizeof(*queue));
+ queue->stats = stats;
+ queue->capacity = capacity;
+ queue->queue_id = queue_id;
+ queue->ttl = ttl;
+ INIT_LIST_HEAD(&queue->queue);
+ INIT_LIST_HEAD(&queue->pending);
+ INIT_LIST_HEAD(&queue->free_pool);
+ spin_lock_init(&queue->lock);
+ init_timer(&queue->gc);
+ queue->gc.data = (unsigned long)queue;
+ queue->gc.function = cw1200_queue_gc;
+
+ queue->pool = kzalloc(sizeof(struct cw1200_queue_item) * capacity,
+ GFP_KERNEL);
+ if (!queue->pool)
+ return -ENOMEM;
+
+ queue->link_map_cache = kzalloc(sizeof(int) * stats->map_capacity,
+ GFP_KERNEL);
+ if (!queue->link_map_cache) {
+ kfree(queue->pool);
+ queue->pool = NULL;
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < capacity; ++i)
+ list_add_tail(&queue->pool[i].head, &queue->free_pool);
+
+ return 0;
+}
+
+int cw1200_queue_clear(struct cw1200_queue *queue)
+{
+ int i;
+ LIST_HEAD(gc_list);
+ struct cw1200_queue_stats *stats = queue->stats;
+ struct cw1200_queue_item *item, *tmp;
+
+ spin_lock_bh(&queue->lock);
+ queue->generation++;
+ list_splice_tail_init(&queue->queue, &queue->pending);
+ list_for_each_entry_safe(item, tmp, &queue->pending, head) {
+ WARN_ON(!item->skb);
+ cw1200_queue_register_post_gc(&gc_list, item);
+ item->skb = NULL;
+ list_move_tail(&item->head, &queue->free_pool);
+ }
+ queue->num_queued = 0;
+ queue->num_pending = 0;
+
+ spin_lock_bh(&stats->lock);
+ for (i = 0; i < stats->map_capacity; ++i) {
+ stats->num_queued -= queue->link_map_cache[i];
+ stats->link_map_cache[i] -= queue->link_map_cache[i];
+ queue->link_map_cache[i] = 0;
+ }
+ spin_unlock_bh(&stats->lock);
+ if (queue->overfull) {
+ queue->overfull = false;
+ __cw1200_queue_unlock(queue);
+ }
+ spin_unlock_bh(&queue->lock);
+ wake_up(&stats->wait_link_id_empty);
+ cw1200_queue_post_gc(stats, &gc_list);
+ return 0;
+}
+
+void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats)
+{
+ kfree(stats->link_map_cache);
+ stats->link_map_cache = NULL;
+}
+
+void cw1200_queue_deinit(struct cw1200_queue *queue)
+{
+ cw1200_queue_clear(queue);
+ del_timer_sync(&queue->gc);
+ INIT_LIST_HEAD(&queue->free_pool);
+ kfree(queue->pool);
+ kfree(queue->link_map_cache);
+ queue->pool = NULL;
+ queue->link_map_cache = NULL;
+ queue->capacity = 0;
+}
+
+size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue,
+ u32 link_id_map)
+{
+ size_t ret;
+ int i, bit;
+ size_t map_capacity = queue->stats->map_capacity;
+
+ if (!link_id_map)
+ return 0;
+
+ spin_lock_bh(&queue->lock);
+ if (link_id_map == (u32)-1) {
+ ret = queue->num_queued - queue->num_pending;
+ } else {
+ ret = 0;
+ for (i = 0, bit = 1; i < map_capacity; ++i, bit <<= 1) {
+ if (link_id_map & bit)
+ ret += queue->link_map_cache[i];
+ }
+ }
+ spin_unlock_bh(&queue->lock);
+ return ret;
+}
+
+int cw1200_queue_put(struct cw1200_queue *queue,
+ struct sk_buff *skb,
+ struct cw1200_txpriv *txpriv)
+{
+ int ret = 0;
+ LIST_HEAD(gc_list);
+ struct cw1200_queue_stats *stats = queue->stats;
+
+ if (txpriv->link_id >= queue->stats->map_capacity)
+ return -EINVAL;
+
+ spin_lock_bh(&queue->lock);
+ if (!WARN_ON(list_empty(&queue->free_pool))) {
+ struct cw1200_queue_item *item = list_first_entry(
+ &queue->free_pool, struct cw1200_queue_item, head);
+ BUG_ON(item->skb);
+
+ list_move_tail(&item->head, &queue->queue);
+ item->skb = skb;
+ item->txpriv = *txpriv;
+ item->generation = 0;
+ item->packet_id = cw1200_queue_mk_packet_id(queue->generation,
+ queue->queue_id,
+ item->generation,
+ item - queue->pool);
+ item->queue_timestamp = jiffies;
+
+ ++queue->num_queued;
+ ++queue->link_map_cache[txpriv->link_id];
+
+ spin_lock_bh(&stats->lock);
+ ++stats->num_queued;
+ ++stats->link_map_cache[txpriv->link_id];
+ spin_unlock_bh(&stats->lock);
+
+ /* TX may happen in parallel sometimes.
+ * Leave extra queue slots so we don't overflow.
+ */
+ if (queue->overfull == false &&
+ queue->num_queued >=
+ (queue->capacity - (num_present_cpus() - 1))) {
+ queue->overfull = true;
+ __cw1200_queue_lock(queue);
+ mod_timer(&queue->gc, jiffies);
+ }
+ } else {
+ ret = -ENOENT;
+ }
+ spin_unlock_bh(&queue->lock);
+ return ret;
+}
+
+int cw1200_queue_get(struct cw1200_queue *queue,
+ u32 link_id_map,
+ struct wsm_tx **tx,
+ struct ieee80211_tx_info **tx_info,
+ const struct cw1200_txpriv **txpriv)
+{
+ int ret = -ENOENT;
+ struct cw1200_queue_item *item;
+ struct cw1200_queue_stats *stats = queue->stats;
+ bool wakeup_stats = false;
+
+ spin_lock_bh(&queue->lock);
+ list_for_each_entry(item, &queue->queue, head) {
+ if (link_id_map & BIT(item->txpriv.link_id)) {
+ ret = 0;
+ break;
+ }
+ }
+
+ if (!WARN_ON(ret)) {
+ *tx = (struct wsm_tx *)item->skb->data;
+ *tx_info = IEEE80211_SKB_CB(item->skb);
+ *txpriv = &item->txpriv;
+ (*tx)->packet_id = item->packet_id;
+ list_move_tail(&item->head, &queue->pending);
+ ++queue->num_pending;
+ --queue->link_map_cache[item->txpriv.link_id];
+ item->xmit_timestamp = jiffies;
+
+ spin_lock_bh(&stats->lock);
+ --stats->num_queued;
+ if (!--stats->link_map_cache[item->txpriv.link_id])
+ wakeup_stats = true;
+ spin_unlock_bh(&stats->lock);
+ }
+ spin_unlock_bh(&queue->lock);
+ if (wakeup_stats)
+ wake_up(&stats->wait_link_id_empty);
+ return ret;
+}
+
+int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id)
+{
+ int ret = 0;
+ u8 queue_generation, queue_id, item_generation, item_id;
+ struct cw1200_queue_item *item;
+ struct cw1200_queue_stats *stats = queue->stats;
+
+ cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
+ &item_generation, &item_id);
+
+ item = &queue->pool[item_id];
+
+ spin_lock_bh(&queue->lock);
+ BUG_ON(queue_id != queue->queue_id);
+ if (queue_generation != queue->generation) {
+ ret = -ENOENT;
+ } else if (item_id >= (unsigned) queue->capacity) {
+ WARN_ON(1);
+ ret = -EINVAL;
+ } else if (item->generation != item_generation) {
+ WARN_ON(1);
+ ret = -ENOENT;
+ } else {
+ --queue->num_pending;
+ ++queue->link_map_cache[item->txpriv.link_id];
+
+ spin_lock_bh(&stats->lock);
+ ++stats->num_queued;
+ ++stats->link_map_cache[item->txpriv.link_id];
+ spin_unlock_bh(&stats->lock);
+
+ item->generation = ++item_generation;
+ item->packet_id = cw1200_queue_mk_packet_id(queue_generation,
+ queue_id,
+ item_generation,
+ item_id);
+ list_move(&item->head, &queue->queue);
+ }
+ spin_unlock_bh(&queue->lock);
+ return ret;
+}
+
+int cw1200_queue_requeue_all(struct cw1200_queue *queue)
+{
+ struct cw1200_queue_item *item, *tmp;
+ struct cw1200_queue_stats *stats = queue->stats;
+ spin_lock_bh(&queue->lock);
+
+ list_for_each_entry_safe_reverse(item, tmp, &queue->pending, head) {
+ --queue->num_pending;
+ ++queue->link_map_cache[item->txpriv.link_id];
+
+ spin_lock_bh(&stats->lock);
+ ++stats->num_queued;
+ ++stats->link_map_cache[item->txpriv.link_id];
+ spin_unlock_bh(&stats->lock);
+
+ ++item->generation;
+ item->packet_id = cw1200_queue_mk_packet_id(queue->generation,
+ queue->queue_id,
+ item->generation,
+ item - queue->pool);
+ list_move(&item->head, &queue->queue);
+ }
+ spin_unlock_bh(&queue->lock);
+
+ return 0;
+}
+
+int cw1200_queue_remove(struct cw1200_queue *queue, u32 packet_id)
+{
+ int ret = 0;
+ u8 queue_generation, queue_id, item_generation, item_id;
+ struct cw1200_queue_item *item;
+ struct cw1200_queue_stats *stats = queue->stats;
+ struct sk_buff *gc_skb = NULL;
+ struct cw1200_txpriv gc_txpriv;
+
+ cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
+ &item_generation, &item_id);
+
+ item = &queue->pool[item_id];
+
+ spin_lock_bh(&queue->lock);
+ BUG_ON(queue_id != queue->queue_id);
+ if (queue_generation != queue->generation) {
+ ret = -ENOENT;
+ } else if (item_id >= (unsigned) queue->capacity) {
+ WARN_ON(1);
+ ret = -EINVAL;
+ } else if (item->generation != item_generation) {
+ WARN_ON(1);
+ ret = -ENOENT;
+ } else {
+ gc_txpriv = item->txpriv;
+ gc_skb = item->skb;
+ item->skb = NULL;
+ --queue->num_pending;
+ --queue->num_queued;
+ ++queue->num_sent;
+ ++item->generation;
+ /* Do not use list_move_tail here, but list_move:
+ * try to utilize cache row.
+ */
+ list_move(&item->head, &queue->free_pool);
+
+ if (queue->overfull &&
+ (queue->num_queued <= (queue->capacity >> 1))) {
+ queue->overfull = false;
+ __cw1200_queue_unlock(queue);
+ }
+ }
+ spin_unlock_bh(&queue->lock);
+
+ if (gc_skb)
+ stats->skb_dtor(stats->priv, gc_skb, &gc_txpriv);
+
+ return ret;
+}
+
+int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id,
+ struct sk_buff **skb,
+ const struct cw1200_txpriv **txpriv)
+{
+ int ret = 0;
+ u8 queue_generation, queue_id, item_generation, item_id;
+ struct cw1200_queue_item *item;
+ cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
+ &item_generation, &item_id);
+
+ item = &queue->pool[item_id];
+
+ spin_lock_bh(&queue->lock);
+ BUG_ON(queue_id != queue->queue_id);
+ if (queue_generation != queue->generation) {
+ ret = -ENOENT;
+ } else if (item_id >= (unsigned) queue->capacity) {
+ WARN_ON(1);
+ ret = -EINVAL;
+ } else if (item->generation != item_generation) {
+ WARN_ON(1);
+ ret = -ENOENT;
+ } else {
+ *skb = item->skb;
+ *txpriv = &item->txpriv;
+ }
+ spin_unlock_bh(&queue->lock);
+ return ret;
+}
+
+void cw1200_queue_lock(struct cw1200_queue *queue)
+{
+ spin_lock_bh(&queue->lock);
+ __cw1200_queue_lock(queue);
+ spin_unlock_bh(&queue->lock);
+}
+
+void cw1200_queue_unlock(struct cw1200_queue *queue)
+{
+ spin_lock_bh(&queue->lock);
+ __cw1200_queue_unlock(queue);
+ spin_unlock_bh(&queue->lock);
+}
+
+bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue,
+ unsigned long *timestamp,
+ u32 pending_frame_id)
+{
+ struct cw1200_queue_item *item;
+ bool ret;
+
+ spin_lock_bh(&queue->lock);
+ ret = !list_empty(&queue->pending);
+ if (ret) {
+ list_for_each_entry(item, &queue->pending, head) {
+ if (item->packet_id != pending_frame_id)
+ if (time_before(item->xmit_timestamp,
+ *timestamp))
+ *timestamp = item->xmit_timestamp;
+ }
+ }
+ spin_unlock_bh(&queue->lock);
+ return ret;
+}
+
+bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats,
+ u32 link_id_map)
+{
+ bool empty = true;
+
+ spin_lock_bh(&stats->lock);
+ if (link_id_map == (u32)-1) {
+ empty = stats->num_queued == 0;
+ } else {
+ int i;
+ for (i = 0; i < stats->map_capacity; ++i) {
+ if (link_id_map & BIT(i)) {
+ if (stats->link_map_cache[i]) {
+ empty = false;
+ break;
+ }
+ }
+ }
+ }
+ spin_unlock_bh(&stats->lock);
+
+ return empty;
+}
diff --git a/drivers/net/wireless/cw1200/queue.h b/drivers/net/wireless/cw1200/queue.h
new file mode 100644
index 000000000000..119f9c79c14e
--- /dev/null
+++ b/drivers/net/wireless/cw1200/queue.h
@@ -0,0 +1,116 @@
+/*
+ * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CW1200_QUEUE_H_INCLUDED
+#define CW1200_QUEUE_H_INCLUDED
+
+/* private */ struct cw1200_queue_item;
+
+/* extern */ struct sk_buff;
+/* extern */ struct wsm_tx;
+/* extern */ struct cw1200_common;
+/* extern */ struct ieee80211_tx_queue_stats;
+/* extern */ struct cw1200_txpriv;
+
+/* forward */ struct cw1200_queue_stats;
+
+typedef void (*cw1200_queue_skb_dtor_t)(struct cw1200_common *priv,
+ struct sk_buff *skb,
+ const struct cw1200_txpriv *txpriv);
+
+struct cw1200_queue {
+ struct cw1200_queue_stats *stats;
+ size_t capacity;
+ size_t num_queued;
+ size_t num_pending;
+ size_t num_sent;
+ struct cw1200_queue_item *pool;
+ struct list_head queue;
+ struct list_head free_pool;
+ struct list_head pending;
+ int tx_locked_cnt;
+ int *link_map_cache;
+ bool overfull;
+ spinlock_t lock; /* Protect queue entry */
+ u8 queue_id;
+ u8 generation;
+ struct timer_list gc;
+ unsigned long ttl;
+};
+
+struct cw1200_queue_stats {
+ spinlock_t lock; /* Protect stats entry */
+ int *link_map_cache;
+ int num_queued;
+ size_t map_capacity;
+ wait_queue_head_t wait_link_id_empty;
+ cw1200_queue_skb_dtor_t skb_dtor;
+ struct cw1200_common *priv;
+};
+
+struct cw1200_txpriv {
+ u8 link_id;
+ u8 raw_link_id;
+ u8 tid;
+ u8 rate_id;
+ u8 offset;
+};
+
+int cw1200_queue_stats_init(struct cw1200_queue_stats *stats,
+ size_t map_capacity,
+ cw1200_queue_skb_dtor_t skb_dtor,
+ struct cw1200_common *priv);
+int cw1200_queue_init(struct cw1200_queue *queue,
+ struct cw1200_queue_stats *stats,
+ u8 queue_id,
+ size_t capacity,
+ unsigned long ttl);
+int cw1200_queue_clear(struct cw1200_queue *queue);
+void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats);
+void cw1200_queue_deinit(struct cw1200_queue *queue);
+
+size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue,
+ u32 link_id_map);
+int cw1200_queue_put(struct cw1200_queue *queue,
+ struct sk_buff *skb,
+ struct cw1200_txpriv *txpriv);
+int cw1200_queue_get(struct cw1200_queue *queue,
+ u32 link_id_map,
+ struct wsm_tx **tx,
+ struct ieee80211_tx_info **tx_info,
+ const struct cw1200_txpriv **txpriv);
+int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id);
+int cw1200_queue_requeue_all(struct cw1200_queue *queue);
+int cw1200_queue_remove(struct cw1200_queue *queue,
+ u32 packet_id);
+int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id,
+ struct sk_buff **skb,
+ const struct cw1200_txpriv **txpriv);
+void cw1200_queue_lock(struct cw1200_queue *queue);
+void cw1200_queue_unlock(struct cw1200_queue *queue);
+bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue,
+ unsigned long *timestamp,
+ u32 pending_frame_id);
+
+bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats,
+ u32 link_id_map);
+
+static inline u8 cw1200_queue_get_queue_id(u32 packet_id)
+{
+ return (packet_id >> 16) & 0xFF;
+}
+
+static inline u8 cw1200_queue_get_generation(u32 packet_id)
+{
+ return (packet_id >> 8) & 0xFF;
+}
+
+#endif /* CW1200_QUEUE_H_INCLUDED */
diff --git a/drivers/net/wireless/cw1200/scan.c b/drivers/net/wireless/cw1200/scan.c
new file mode 100644
index 000000000000..ee3c19037aac
--- /dev/null
+++ b/drivers/net/wireless/cw1200/scan.c
@@ -0,0 +1,461 @@
+/*
+ * Scan implementation for ST-Ericsson CW1200 mac80211 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/sched.h>
+#include "cw1200.h"
+#include "scan.h"
+#include "sta.h"
+#include "pm.h"
+
+static void cw1200_scan_restart_delayed(struct cw1200_common *priv);
+
+static int cw1200_scan_start(struct cw1200_common *priv, struct wsm_scan *scan)
+{
+ int ret, i;
+ int tmo = 2000;
+
+ switch (priv->join_status) {
+ case CW1200_JOIN_STATUS_PRE_STA:
+ case CW1200_JOIN_STATUS_JOINING:
+ return -EBUSY;
+ default:
+ break;
+ }
+
+ wiphy_dbg(priv->hw->wiphy, "[SCAN] hw req, type %d, %d channels, flags: 0x%x.\n",
+ scan->type, scan->num_channels, scan->flags);
+
+ for (i = 0; i < scan->num_channels; ++i)
+ tmo += scan->ch[i].max_chan_time + 10;
+
+ cancel_delayed_work_sync(&priv->clear_recent_scan_work);
+ atomic_set(&priv->scan.in_progress, 1);
+ atomic_set(&priv->recent_scan, 1);
+ cw1200_pm_stay_awake(&priv->pm_state, tmo * HZ / 1000);
+ queue_delayed_work(priv->workqueue, &priv->scan.timeout,
+ tmo * HZ / 1000);
+ ret = wsm_scan(priv, scan);
+ if (ret) {
+ atomic_set(&priv->scan.in_progress, 0);
+ cancel_delayed_work_sync(&priv->scan.timeout);
+ cw1200_scan_restart_delayed(priv);
+ }
+ return ret;
+}
+
+int cw1200_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req)
+{
+ struct cw1200_common *priv = hw->priv;
+ struct wsm_template_frame frame = {
+ .frame_type = WSM_FRAME_TYPE_PROBE_REQUEST,
+ };
+ int i, ret;
+
+ if (!priv->vif)
+ return -EINVAL;
+
+ /* Scan when P2P_GO corrupt firmware MiniAP mode */
+ if (priv->join_status == CW1200_JOIN_STATUS_AP)
+ return -EOPNOTSUPP;
+
+ if (req->n_ssids == 1 && !req->ssids[0].ssid_len)
+ req->n_ssids = 0;
+
+ wiphy_dbg(hw->wiphy, "[SCAN] Scan request for %d SSIDs.\n",
+ req->n_ssids);
+
+ if (req->n_ssids > WSM_SCAN_MAX_NUM_OF_SSIDS)
+ return -EINVAL;
+
+ frame.skb = ieee80211_probereq_get(hw, priv->vif, NULL, 0,
+ req->ie_len);
+ if (!frame.skb)
+ return -ENOMEM;
+
+ if (req->ie_len)
+ memcpy(skb_put(frame.skb, req->ie_len), req->ie, req->ie_len);
+
+ /* will be unlocked in cw1200_scan_work() */
+ down(&priv->scan.lock);
+ mutex_lock(&priv->conf_mutex);
+
+ ret = wsm_set_template_frame(priv, &frame);
+ if (!ret) {
+ /* Host want to be the probe responder. */
+ ret = wsm_set_probe_responder(priv, true);
+ }
+ if (ret) {
+ mutex_unlock(&priv->conf_mutex);
+ up(&priv->scan.lock);
+ dev_kfree_skb(frame.skb);
+ return ret;
+ }
+
+ wsm_lock_tx(priv);
+
+ BUG_ON(priv->scan.req);
+ priv->scan.req = req;
+ priv->scan.n_ssids = 0;
+ priv->scan.status = 0;
+ priv->scan.begin = &req->channels[0];
+ priv->scan.curr = priv->scan.begin;
+ priv->scan.end = &req->channels[req->n_channels];
+ priv->scan.output_power = priv->output_power;
+
+ for (i = 0; i < req->n_ssids; ++i) {
+ struct wsm_ssid *dst = &priv->scan.ssids[priv->scan.n_ssids];
+ memcpy(&dst->ssid[0], req->ssids[i].ssid, sizeof(dst->ssid));
+ dst->length = req->ssids[i].ssid_len;
+ ++priv->scan.n_ssids;
+ }
+
+ mutex_unlock(&priv->conf_mutex);
+
+ if (frame.skb)
+ dev_kfree_skb(frame.skb);
+ queue_work(priv->workqueue, &priv->scan.work);
+ return 0;
+}
+
+void cw1200_scan_work(struct work_struct *work)
+{
+ struct cw1200_common *priv = container_of(work, struct cw1200_common,
+ scan.work);
+ struct ieee80211_channel **it;
+ struct wsm_scan scan = {
+ .type = WSM_SCAN_TYPE_FOREGROUND,
+ .flags = WSM_SCAN_FLAG_SPLIT_METHOD,
+ };
+ bool first_run = (priv->scan.begin == priv->scan.curr &&
+ priv->scan.begin != priv->scan.end);
+ int i;
+
+ if (first_run) {
+ /* Firmware gets crazy if scan request is sent
+ * when STA is joined but not yet associated.
+ * Force unjoin in this case.
+ */
+ if (cancel_delayed_work_sync(&priv->join_timeout) > 0)
+ cw1200_join_timeout(&priv->join_timeout.work);
+ }
+
+ mutex_lock(&priv->conf_mutex);
+
+ if (first_run) {
+ if (priv->join_status == CW1200_JOIN_STATUS_STA &&
+ !(priv->powersave_mode.mode & WSM_PSM_PS)) {
+ struct wsm_set_pm pm = priv->powersave_mode;
+ pm.mode = WSM_PSM_PS;
+ cw1200_set_pm(priv, &pm);
+ } else if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) {
+ /* FW bug: driver has to restart p2p-dev mode
+ * after scan
+ */
+ cw1200_disable_listening(priv);
+ }
+ }
+
+ if (!priv->scan.req || (priv->scan.curr == priv->scan.end)) {
+ if (priv->scan.output_power != priv->output_power)
+ wsm_set_output_power(priv, priv->output_power * 10);
+ if (priv->join_status == CW1200_JOIN_STATUS_STA &&
+ !(priv->powersave_mode.mode & WSM_PSM_PS))
+ cw1200_set_pm(priv, &priv->powersave_mode);
+
+ if (priv->scan.status < 0)
+ wiphy_dbg(priv->hw->wiphy, "[SCAN] Scan failed (%d).\n",
+ priv->scan.status);
+ else if (priv->scan.req)
+ wiphy_dbg(priv->hw->wiphy,
+ "[SCAN] Scan completed.\n");
+ else
+ wiphy_dbg(priv->hw->wiphy,
+ "[SCAN] Scan canceled.\n");
+
+ priv->scan.req = NULL;
+ cw1200_scan_restart_delayed(priv);
+ wsm_unlock_tx(priv);
+ mutex_unlock(&priv->conf_mutex);
+ ieee80211_scan_completed(priv->hw, priv->scan.status ? 1 : 0);
+ up(&priv->scan.lock);
+ return;
+ } else {
+ struct ieee80211_channel *first = *priv->scan.curr;
+ for (it = priv->scan.curr + 1, i = 1;
+ it != priv->scan.end && i < WSM_SCAN_MAX_NUM_OF_CHANNELS;
+ ++it, ++i) {
+ if ((*it)->band != first->band)
+ break;
+ if (((*it)->flags ^ first->flags) &
+ IEEE80211_CHAN_PASSIVE_SCAN)
+ break;
+ if (!(first->flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
+ (*it)->max_power != first->max_power)
+ break;
+ }
+ scan.band = first->band;
+
+ if (priv->scan.req->no_cck)
+ scan.max_tx_rate = WSM_TRANSMIT_RATE_6;
+ else
+ scan.max_tx_rate = WSM_TRANSMIT_RATE_1;
+ scan.num_probes =
+ (first->flags & IEEE80211_CHAN_PASSIVE_SCAN) ? 0 : 2;
+ scan.num_ssids = priv->scan.n_ssids;
+ scan.ssids = &priv->scan.ssids[0];
+ scan.num_channels = it - priv->scan.curr;
+ /* TODO: Is it optimal? */
+ scan.probe_delay = 100;
+ /* It is not stated in WSM specification, however
+ * FW team says that driver may not use FG scan
+ * when joined.
+ */
+ if (priv->join_status == CW1200_JOIN_STATUS_STA) {
+ scan.type = WSM_SCAN_TYPE_BACKGROUND;
+ scan.flags = WSM_SCAN_FLAG_FORCE_BACKGROUND;
+ }
+ scan.ch = kzalloc(
+ sizeof(struct wsm_scan_ch) * (it - priv->scan.curr),
+ GFP_KERNEL);
+ if (!scan.ch) {
+ priv->scan.status = -ENOMEM;
+ goto fail;
+ }
+ for (i = 0; i < scan.num_channels; ++i) {
+ scan.ch[i].number = priv->scan.curr[i]->hw_value;
+ if (priv->scan.curr[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN) {
+ scan.ch[i].min_chan_time = 50;
+ scan.ch[i].max_chan_time = 100;
+ } else {
+ scan.ch[i].min_chan_time = 10;
+ scan.ch[i].max_chan_time = 25;
+ }
+ }
+ if (!(first->flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
+ priv->scan.output_power != first->max_power) {
+ priv->scan.output_power = first->max_power;
+ wsm_set_output_power(priv,
+ priv->scan.output_power * 10);
+ }
+ priv->scan.status = cw1200_scan_start(priv, &scan);
+ kfree(scan.ch);
+ if (priv->scan.status)
+ goto fail;
+ priv->scan.curr = it;
+ }
+ mutex_unlock(&priv->conf_mutex);
+ return;
+
+fail:
+ priv->scan.curr = priv->scan.end;
+ mutex_unlock(&priv->conf_mutex);
+ queue_work(priv->workqueue, &priv->scan.work);
+ return;
+}
+
+static void cw1200_scan_restart_delayed(struct cw1200_common *priv)
+{
+ /* FW bug: driver has to restart p2p-dev mode after scan. */
+ if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) {
+ cw1200_enable_listening(priv);
+ cw1200_update_filtering(priv);
+ }
+
+ if (priv->delayed_unjoin) {
+ priv->delayed_unjoin = false;
+ if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0)
+ wsm_unlock_tx(priv);
+ } else if (priv->delayed_link_loss) {
+ wiphy_dbg(priv->hw->wiphy, "[CQM] Requeue BSS loss.\n");
+ priv->delayed_link_loss = 0;
+ cw1200_cqm_bssloss_sm(priv, 1, 0, 0);
+ }
+}
+
+static void cw1200_scan_complete(struct cw1200_common *priv)
+{
+ queue_delayed_work(priv->workqueue, &priv->clear_recent_scan_work, HZ);
+ if (priv->scan.direct_probe) {
+ wiphy_dbg(priv->hw->wiphy, "[SCAN] Direct probe complete.\n");
+ cw1200_scan_restart_delayed(priv);
+ priv->scan.direct_probe = 0;
+ up(&priv->scan.lock);
+ wsm_unlock_tx(priv);
+ } else {
+ cw1200_scan_work(&priv->scan.work);
+ }
+}
+
+void cw1200_scan_failed_cb(struct cw1200_common *priv)
+{
+ if (priv->mode == NL80211_IFTYPE_UNSPECIFIED)
+ /* STA is stopped. */
+ return;
+
+ if (cancel_delayed_work_sync(&priv->scan.timeout) > 0) {
+ priv->scan.status = -EIO;
+ queue_delayed_work(priv->workqueue, &priv->scan.timeout, 0);
+ }
+}
+
+
+void cw1200_scan_complete_cb(struct cw1200_common *priv,
+ struct wsm_scan_complete *arg)
+{
+ if (priv->mode == NL80211_IFTYPE_UNSPECIFIED)
+ /* STA is stopped. */
+ return;
+
+ if (cancel_delayed_work_sync(&priv->scan.timeout) > 0) {
+ priv->scan.status = 1;
+ queue_delayed_work(priv->workqueue, &priv->scan.timeout, 0);
+ }
+}
+
+void cw1200_clear_recent_scan_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common,
+ clear_recent_scan_work.work);
+ atomic_xchg(&priv->recent_scan, 0);
+}
+
+void cw1200_scan_timeout(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, scan.timeout.work);
+ if (atomic_xchg(&priv->scan.in_progress, 0)) {
+ if (priv->scan.status > 0) {
+ priv->scan.status = 0;
+ } else if (!priv->scan.status) {
+ wiphy_warn(priv->hw->wiphy,
+ "Timeout waiting for scan complete notification.\n");
+ priv->scan.status = -ETIMEDOUT;
+ priv->scan.curr = priv->scan.end;
+ wsm_stop_scan(priv);
+ }
+ cw1200_scan_complete(priv);
+ }
+}
+
+void cw1200_probe_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, scan.probe_work.work);
+ u8 queue_id = cw1200_queue_get_queue_id(priv->pending_frame_id);
+ struct cw1200_queue *queue = &priv->tx_queue[queue_id];
+ const struct cw1200_txpriv *txpriv;
+ struct wsm_tx *wsm;
+ struct wsm_template_frame frame = {
+ .frame_type = WSM_FRAME_TYPE_PROBE_REQUEST,
+ };
+ struct wsm_ssid ssids[1] = {{
+ .length = 0,
+ } };
+ struct wsm_scan_ch ch[1] = {{
+ .min_chan_time = 0,
+ .max_chan_time = 10,
+ } };
+ struct wsm_scan scan = {
+ .type = WSM_SCAN_TYPE_FOREGROUND,
+ .num_probes = 1,
+ .probe_delay = 0,
+ .num_channels = 1,
+ .ssids = ssids,
+ .ch = ch,
+ };
+ u8 *ies;
+ size_t ies_len;
+ int ret;
+
+ wiphy_dbg(priv->hw->wiphy, "[SCAN] Direct probe work.\n");
+
+ mutex_lock(&priv->conf_mutex);
+ if (down_trylock(&priv->scan.lock)) {
+ /* Scan is already in progress. Requeue self. */
+ schedule();
+ queue_delayed_work(priv->workqueue,
+ &priv->scan.probe_work, HZ / 10);
+ mutex_unlock(&priv->conf_mutex);
+ return;
+ }
+
+ /* Make sure we still have a pending probe req */
+ if (cw1200_queue_get_skb(queue, priv->pending_frame_id,
+ &frame.skb, &txpriv)) {
+ up(&priv->scan.lock);
+ mutex_unlock(&priv->conf_mutex);
+ wsm_unlock_tx(priv);
+ return;
+ }
+ wsm = (struct wsm_tx *)frame.skb->data;
+ scan.max_tx_rate = wsm->max_tx_rate;
+ scan.band = (priv->channel->band == IEEE80211_BAND_5GHZ) ?
+ WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G;
+ if (priv->join_status == CW1200_JOIN_STATUS_STA ||
+ priv->join_status == CW1200_JOIN_STATUS_IBSS) {
+ scan.type = WSM_SCAN_TYPE_BACKGROUND;
+ scan.flags = WSM_SCAN_FLAG_FORCE_BACKGROUND;
+ }
+ ch[0].number = priv->channel->hw_value;
+
+ skb_pull(frame.skb, txpriv->offset);
+
+ ies = &frame.skb->data[sizeof(struct ieee80211_hdr_3addr)];
+ ies_len = frame.skb->len - sizeof(struct ieee80211_hdr_3addr);
+
+ if (ies_len) {
+ u8 *ssidie =
+ (u8 *)cfg80211_find_ie(WLAN_EID_SSID, ies, ies_len);
+ if (ssidie && ssidie[1] && ssidie[1] <= sizeof(ssids[0].ssid)) {
+ u8 *nextie = &ssidie[2 + ssidie[1]];
+ /* Remove SSID from the IE list. It has to be provided
+ * as a separate argument in cw1200_scan_start call
+ */
+
+ /* Store SSID localy */
+ ssids[0].length = ssidie[1];
+ memcpy(ssids[0].ssid, &ssidie[2], ssids[0].length);
+ scan.num_ssids = 1;
+
+ /* Remove SSID from IE list */
+ ssidie[1] = 0;
+ memmove(&ssidie[2], nextie, &ies[ies_len] - nextie);
+ skb_trim(frame.skb, frame.skb->len - ssids[0].length);
+ }
+ }
+
+ /* FW bug: driver has to restart p2p-dev mode after scan */
+ if (priv->join_status == CW1200_JOIN_STATUS_MONITOR)
+ cw1200_disable_listening(priv);
+ ret = wsm_set_template_frame(priv, &frame);
+ priv->scan.direct_probe = 1;
+ if (!ret) {
+ wsm_flush_tx(priv);
+ ret = cw1200_scan_start(priv, &scan);
+ }
+ mutex_unlock(&priv->conf_mutex);
+
+ skb_push(frame.skb, txpriv->offset);
+ if (!ret)
+ IEEE80211_SKB_CB(frame.skb)->flags |= IEEE80211_TX_STAT_ACK;
+ BUG_ON(cw1200_queue_remove(queue, priv->pending_frame_id));
+
+ if (ret) {
+ priv->scan.direct_probe = 0;
+ up(&priv->scan.lock);
+ wsm_unlock_tx(priv);
+ }
+
+ return;
+}
diff --git a/drivers/net/wireless/cw1200/scan.h b/drivers/net/wireless/cw1200/scan.h
new file mode 100644
index 000000000000..5a8296ccfa82
--- /dev/null
+++ b/drivers/net/wireless/cw1200/scan.h
@@ -0,0 +1,56 @@
+/*
+ * Scan interface for ST-Ericsson CW1200 mac80211 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SCAN_H_INCLUDED
+#define SCAN_H_INCLUDED
+
+#include <linux/semaphore.h>
+#include "wsm.h"
+
+/* external */ struct sk_buff;
+/* external */ struct cfg80211_scan_request;
+/* external */ struct ieee80211_channel;
+/* external */ struct ieee80211_hw;
+/* external */ struct work_struct;
+
+struct cw1200_scan {
+ struct semaphore lock;
+ struct work_struct work;
+ struct delayed_work timeout;
+ struct cfg80211_scan_request *req;
+ struct ieee80211_channel **begin;
+ struct ieee80211_channel **curr;
+ struct ieee80211_channel **end;
+ struct wsm_ssid ssids[WSM_SCAN_MAX_NUM_OF_SSIDS];
+ int output_power;
+ int n_ssids;
+ int status;
+ atomic_t in_progress;
+ /* Direct probe requests workaround */
+ struct delayed_work probe_work;
+ int direct_probe;
+};
+
+int cw1200_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req);
+void cw1200_scan_work(struct work_struct *work);
+void cw1200_scan_timeout(struct work_struct *work);
+void cw1200_clear_recent_scan_work(struct work_struct *work);
+void cw1200_scan_complete_cb(struct cw1200_common *priv,
+ struct wsm_scan_complete *arg);
+void cw1200_scan_failed_cb(struct cw1200_common *priv);
+
+/* ******************************************************************** */
+/* Raw probe requests TX workaround */
+void cw1200_probe_work(struct work_struct *work);
+
+#endif
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c
new file mode 100644
index 000000000000..7365674366f4
--- /dev/null
+++ b/drivers/net/wireless/cw1200/sta.c
@@ -0,0 +1,2403 @@
+/*
+ * Mac80211 STA API for ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/sched.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+
+#include "cw1200.h"
+#include "sta.h"
+#include "fwio.h"
+#include "bh.h"
+#include "debug.h"
+
+#ifndef ERP_INFO_BYTE_OFFSET
+#define ERP_INFO_BYTE_OFFSET 2
+#endif
+
+static void cw1200_do_join(struct cw1200_common *priv);
+static void cw1200_do_unjoin(struct cw1200_common *priv);
+
+static int cw1200_upload_beacon(struct cw1200_common *priv);
+static int cw1200_upload_pspoll(struct cw1200_common *priv);
+static int cw1200_upload_null(struct cw1200_common *priv);
+static int cw1200_upload_qosnull(struct cw1200_common *priv);
+static int cw1200_start_ap(struct cw1200_common *priv);
+static int cw1200_update_beaconing(struct cw1200_common *priv);
+static int cw1200_enable_beaconing(struct cw1200_common *priv,
+ bool enable);
+static void __cw1200_sta_notify(struct ieee80211_hw *dev,
+ struct ieee80211_vif *vif,
+ enum sta_notify_cmd notify_cmd,
+ int link_id);
+static int __cw1200_flush(struct cw1200_common *priv, bool drop);
+
+static inline void __cw1200_free_event_queue(struct list_head *list)
+{
+ struct cw1200_wsm_event *event, *tmp;
+ list_for_each_entry_safe(event, tmp, list, link) {
+ list_del(&event->link);
+ kfree(event);
+ }
+}
+
+/* ******************************************************************** */
+/* STA API */
+
+int cw1200_start(struct ieee80211_hw *dev)
+{
+ struct cw1200_common *priv = dev->priv;
+ int ret = 0;
+
+ cw1200_pm_stay_awake(&priv->pm_state, HZ);
+
+ mutex_lock(&priv->conf_mutex);
+
+ /* default EDCA */
+ WSM_EDCA_SET(&priv->edca, 0, 0x0002, 0x0003, 0x0007, 47, 0xc8, false);
+ WSM_EDCA_SET(&priv->edca, 1, 0x0002, 0x0007, 0x000f, 94, 0xc8, false);
+ WSM_EDCA_SET(&priv->edca, 2, 0x0003, 0x000f, 0x03ff, 0, 0xc8, false);
+ WSM_EDCA_SET(&priv->edca, 3, 0x0007, 0x000f, 0x03ff, 0, 0xc8, false);
+ ret = wsm_set_edca_params(priv, &priv->edca);
+ if (ret)
+ goto out;
+
+ ret = cw1200_set_uapsd_param(priv, &priv->edca);
+ if (ret)
+ goto out;
+
+ priv->setbssparams_done = false;
+
+ memcpy(priv->mac_addr, dev->wiphy->perm_addr, ETH_ALEN);
+ priv->mode = NL80211_IFTYPE_MONITOR;
+ priv->wep_default_key_id = -1;
+
+ priv->cqm_beacon_loss_count = 10;
+
+ ret = cw1200_setup_mac(priv);
+ if (ret)
+ goto out;
+
+out:
+ mutex_unlock(&priv->conf_mutex);
+ return ret;
+}
+
+void cw1200_stop(struct ieee80211_hw *dev)
+{
+ struct cw1200_common *priv = dev->priv;
+ LIST_HEAD(list);
+ int i;
+
+ wsm_lock_tx(priv);
+
+ while (down_trylock(&priv->scan.lock)) {
+ /* Scan is in progress. Force it to stop. */
+ priv->scan.req = NULL;
+ schedule();
+ }
+ up(&priv->scan.lock);
+
+ cancel_delayed_work_sync(&priv->scan.probe_work);
+ cancel_delayed_work_sync(&priv->scan.timeout);
+ cancel_delayed_work_sync(&priv->clear_recent_scan_work);
+ cancel_delayed_work_sync(&priv->join_timeout);
+ cw1200_cqm_bssloss_sm(priv, 0, 0, 0);
+ cancel_work_sync(&priv->unjoin_work);
+ cancel_delayed_work_sync(&priv->link_id_gc_work);
+ flush_workqueue(priv->workqueue);
+ del_timer_sync(&priv->mcast_timeout);
+ mutex_lock(&priv->conf_mutex);
+ priv->mode = NL80211_IFTYPE_UNSPECIFIED;
+ priv->listening = false;
+
+ spin_lock(&priv->event_queue_lock);
+ list_splice_init(&priv->event_queue, &list);
+ spin_unlock(&priv->event_queue_lock);
+ __cw1200_free_event_queue(&list);
+
+
+ priv->join_status = CW1200_JOIN_STATUS_PASSIVE;
+ priv->join_pending = false;
+
+ for (i = 0; i < 4; i++)
+ cw1200_queue_clear(&priv->tx_queue[i]);
+ mutex_unlock(&priv->conf_mutex);
+ tx_policy_clean(priv);
+
+ /* HACK! */
+ if (atomic_xchg(&priv->tx_lock, 1) != 1)
+ pr_debug("[STA] TX is force-unlocked due to stop request.\n");
+
+ wsm_unlock_tx(priv);
+ atomic_xchg(&priv->tx_lock, 0); /* for recovery to work */
+}
+
+static int cw1200_bssloss_mitigation = 1;
+module_param(cw1200_bssloss_mitigation, int, 0644);
+MODULE_PARM_DESC(cw1200_bssloss_mitigation, "BSS Loss mitigation. 0 == disabled, 1 == enabled (default)");
+
+
+void __cw1200_cqm_bssloss_sm(struct cw1200_common *priv,
+ int init, int good, int bad)
+{
+ int tx = 0;
+
+ priv->delayed_link_loss = 0;
+ cancel_work_sync(&priv->bss_params_work);
+
+ pr_debug("[STA] CQM BSSLOSS_SM: state: %d init %d good %d bad: %d txlock: %d uj: %d\n",
+ priv->bss_loss_state,
+ init, good, bad,
+ atomic_read(&priv->tx_lock),
+ priv->delayed_unjoin);
+
+ /* If we have a pending unjoin */
+ if (priv->delayed_unjoin)
+ return;
+
+ if (init) {
+ queue_delayed_work(priv->workqueue,
+ &priv->bss_loss_work,
+ HZ);
+ priv->bss_loss_state = 0;
+
+ /* Skip the confimration procedure in P2P case */
+ if (!priv->vif->p2p && !atomic_read(&priv->tx_lock))
+ tx = 1;
+ } else if (good) {
+ cancel_delayed_work_sync(&priv->bss_loss_work);
+ priv->bss_loss_state = 0;
+ queue_work(priv->workqueue, &priv->bss_params_work);
+ } else if (bad) {
+ /* XXX Should we just keep going until we time out? */
+ if (priv->bss_loss_state < 3)
+ tx = 1;
+ } else {
+ cancel_delayed_work_sync(&priv->bss_loss_work);
+ priv->bss_loss_state = 0;
+ }
+
+ /* Bypass mitigation if it's disabled */
+ if (!cw1200_bssloss_mitigation)
+ tx = 0;
+
+ /* Spit out a NULL packet to our AP if necessary */
+ if (tx) {
+ struct sk_buff *skb;
+
+ priv->bss_loss_state++;
+
+ skb = ieee80211_nullfunc_get(priv->hw, priv->vif);
+ WARN_ON(!skb);
+ if (skb)
+ cw1200_tx(priv->hw, NULL, skb);
+ }
+}
+
+int cw1200_add_interface(struct ieee80211_hw *dev,
+ struct ieee80211_vif *vif)
+{
+ int ret;
+ struct cw1200_common *priv = dev->priv;
+ /* __le32 auto_calibration_mode = __cpu_to_le32(1); */
+
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI;
+
+ mutex_lock(&priv->conf_mutex);
+
+ if (priv->mode != NL80211_IFTYPE_MONITOR) {
+ mutex_unlock(&priv->conf_mutex);
+ return -EOPNOTSUPP;
+ }
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ priv->mode = vif->type;
+ break;
+ default:
+ mutex_unlock(&priv->conf_mutex);
+ return -EOPNOTSUPP;
+ }
+
+ priv->vif = vif;
+ memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
+ ret = cw1200_setup_mac(priv);
+ /* Enable auto-calibration */
+ /* Exception in subsequent channel switch; disabled.
+ * wsm_write_mib(priv, WSM_MIB_ID_SET_AUTO_CALIBRATION_MODE,
+ * &auto_calibration_mode, sizeof(auto_calibration_mode));
+ */
+
+ mutex_unlock(&priv->conf_mutex);
+ return ret;
+}
+
+void cw1200_remove_interface(struct ieee80211_hw *dev,
+ struct ieee80211_vif *vif)
+{
+ struct cw1200_common *priv = dev->priv;
+ struct wsm_reset reset = {
+ .reset_statistics = true,
+ };
+ int i;
+
+ mutex_lock(&priv->conf_mutex);
+ switch (priv->join_status) {
+ case CW1200_JOIN_STATUS_JOINING:
+ case CW1200_JOIN_STATUS_PRE_STA:
+ case CW1200_JOIN_STATUS_STA:
+ case CW1200_JOIN_STATUS_IBSS:
+ wsm_lock_tx(priv);
+ if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0)
+ wsm_unlock_tx(priv);
+ break;
+ case CW1200_JOIN_STATUS_AP:
+ for (i = 0; priv->link_id_map; ++i) {
+ if (priv->link_id_map & BIT(i)) {
+ reset.link_id = i;
+ wsm_reset(priv, &reset);
+ priv->link_id_map &= ~BIT(i);
+ }
+ }
+ memset(priv->link_id_db, 0, sizeof(priv->link_id_db));
+ priv->sta_asleep_mask = 0;
+ priv->enable_beacon = false;
+ priv->tx_multicast = false;
+ priv->aid0_bit_set = false;
+ priv->buffered_multicasts = false;
+ priv->pspoll_mask = 0;
+ reset.link_id = 0;
+ wsm_reset(priv, &reset);
+ break;
+ case CW1200_JOIN_STATUS_MONITOR:
+ cw1200_update_listening(priv, false);
+ break;
+ default:
+ break;
+ }
+ priv->vif = NULL;
+ priv->mode = NL80211_IFTYPE_MONITOR;
+ memset(priv->mac_addr, 0, ETH_ALEN);
+ memset(&priv->p2p_ps_modeinfo, 0, sizeof(priv->p2p_ps_modeinfo));
+ cw1200_free_keys(priv);
+ cw1200_setup_mac(priv);
+ priv->listening = false;
+ priv->join_status = CW1200_JOIN_STATUS_PASSIVE;
+ if (!__cw1200_flush(priv, true))
+ wsm_unlock_tx(priv);
+
+ mutex_unlock(&priv->conf_mutex);
+}
+
+int cw1200_change_interface(struct ieee80211_hw *dev,
+ struct ieee80211_vif *vif,
+ enum nl80211_iftype new_type,
+ bool p2p)
+{
+ int ret = 0;
+ pr_debug("change_interface new: %d (%d), old: %d (%d)\n", new_type,
+ p2p, vif->type, vif->p2p);
+
+ if (new_type != vif->type || vif->p2p != p2p) {
+ cw1200_remove_interface(dev, vif);
+ vif->type = new_type;
+ vif->p2p = p2p;
+ ret = cw1200_add_interface(dev, vif);
+ }
+
+ return ret;
+}
+
+int cw1200_config(struct ieee80211_hw *dev, u32 changed)
+{
+ int ret = 0;
+ struct cw1200_common *priv = dev->priv;
+ struct ieee80211_conf *conf = &dev->conf;
+
+ pr_debug("CONFIG CHANGED: %08x\n", changed);
+
+ down(&priv->scan.lock);
+ mutex_lock(&priv->conf_mutex);
+ /* TODO: IEEE80211_CONF_CHANGE_QOS */
+ /* TODO: IEEE80211_CONF_CHANGE_LISTEN_INTERVAL */
+
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ priv->output_power = conf->power_level;
+ pr_debug("[STA] TX power: %d\n", priv->output_power);
+ wsm_set_output_power(priv, priv->output_power * 10);
+ }
+
+ if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) &&
+ (priv->channel != conf->chandef.chan)) {
+ struct ieee80211_channel *ch = conf->chandef.chan;
+ struct wsm_switch_channel channel = {
+ .channel_number = ch->hw_value,
+ };
+ pr_debug("[STA] Freq %d (wsm ch: %d).\n",
+ ch->center_freq, ch->hw_value);
+
+ /* __cw1200_flush() implicitly locks tx, if successful */
+ if (!__cw1200_flush(priv, false)) {
+ if (!wsm_switch_channel(priv, &channel)) {
+ ret = wait_event_timeout(priv->channel_switch_done,
+ !priv->channel_switch_in_progress,
+ 3 * HZ);
+ if (ret) {
+ /* Already unlocks if successful */
+ priv->channel = ch;
+ ret = 0;
+ } else {
+ ret = -ETIMEDOUT;
+ }
+ } else {
+ /* Unlock if switch channel fails */
+ wsm_unlock_tx(priv);
+ }
+ }
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_PS) {
+ if (!(conf->flags & IEEE80211_CONF_PS))
+ priv->powersave_mode.mode = WSM_PSM_ACTIVE;
+ else if (conf->dynamic_ps_timeout <= 0)
+ priv->powersave_mode.mode = WSM_PSM_PS;
+ else
+ priv->powersave_mode.mode = WSM_PSM_FAST_PS;
+
+ /* Firmware requires that value for this 1-byte field must
+ * be specified in units of 500us. Values above the 128ms
+ * threshold are not supported.
+ */
+ if (conf->dynamic_ps_timeout >= 0x80)
+ priv->powersave_mode.fast_psm_idle_period = 0xFF;
+ else
+ priv->powersave_mode.fast_psm_idle_period =
+ conf->dynamic_ps_timeout << 1;
+
+ if (priv->join_status == CW1200_JOIN_STATUS_STA &&
+ priv->bss_params.aid)
+ cw1200_set_pm(priv, &priv->powersave_mode);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ /* TBD: It looks like it's transparent
+ * there's a monitor interface present -- use this
+ * to determine for example whether to calculate
+ * timestamps for packets or not, do not use instead
+ * of filter flags!
+ */
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_IDLE) {
+ struct wsm_operational_mode mode = {
+ .power_mode = cw1200_power_mode,
+ .disable_more_flag_usage = true,
+ };
+
+ wsm_lock_tx(priv);
+ /* Disable p2p-dev mode forced by TX request */
+ if ((priv->join_status == CW1200_JOIN_STATUS_MONITOR) &&
+ (conf->flags & IEEE80211_CONF_IDLE) &&
+ !priv->listening) {
+ cw1200_disable_listening(priv);
+ priv->join_status = CW1200_JOIN_STATUS_PASSIVE;
+ }
+ wsm_set_operational_mode(priv, &mode);
+ wsm_unlock_tx(priv);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
+ pr_debug("[STA] Retry limits: %d (long), %d (short).\n",
+ conf->long_frame_max_tx_count,
+ conf->short_frame_max_tx_count);
+ spin_lock_bh(&priv->tx_policy_cache.lock);
+ priv->long_frame_max_tx_count = conf->long_frame_max_tx_count;
+ priv->short_frame_max_tx_count =
+ (conf->short_frame_max_tx_count < 0x0F) ?
+ conf->short_frame_max_tx_count : 0x0F;
+ priv->hw->max_rate_tries = priv->short_frame_max_tx_count;
+ spin_unlock_bh(&priv->tx_policy_cache.lock);
+ }
+ mutex_unlock(&priv->conf_mutex);
+ up(&priv->scan.lock);
+ return ret;
+}
+
+void cw1200_update_filtering(struct cw1200_common *priv)
+{
+ int ret;
+ bool bssid_filtering = !priv->rx_filter.bssid;
+ bool is_p2p = priv->vif && priv->vif->p2p;
+ bool is_sta = priv->vif && NL80211_IFTYPE_STATION == priv->vif->type;
+
+ static struct wsm_beacon_filter_control bf_ctrl;
+ static struct wsm_mib_beacon_filter_table bf_tbl = {
+ .entry[0].ie_id = WLAN_EID_VENDOR_SPECIFIC,
+ .entry[0].flags = WSM_BEACON_FILTER_IE_HAS_CHANGED |
+ WSM_BEACON_FILTER_IE_NO_LONGER_PRESENT |
+ WSM_BEACON_FILTER_IE_HAS_APPEARED,
+ .entry[0].oui[0] = 0x50,
+ .entry[0].oui[1] = 0x6F,
+ .entry[0].oui[2] = 0x9A,
+ .entry[1].ie_id = WLAN_EID_HT_OPERATION,
+ .entry[1].flags = WSM_BEACON_FILTER_IE_HAS_CHANGED |
+ WSM_BEACON_FILTER_IE_NO_LONGER_PRESENT |
+ WSM_BEACON_FILTER_IE_HAS_APPEARED,
+ .entry[2].ie_id = WLAN_EID_ERP_INFO,
+ .entry[2].flags = WSM_BEACON_FILTER_IE_HAS_CHANGED |
+ WSM_BEACON_FILTER_IE_NO_LONGER_PRESENT |
+ WSM_BEACON_FILTER_IE_HAS_APPEARED,
+ };
+
+ if (priv->join_status == CW1200_JOIN_STATUS_PASSIVE)
+ return;
+ else if (priv->join_status == CW1200_JOIN_STATUS_MONITOR)
+ bssid_filtering = false;
+
+ if (priv->disable_beacon_filter) {
+ bf_ctrl.enabled = 0;
+ bf_ctrl.bcn_count = 1;
+ bf_tbl.num = __cpu_to_le32(0);
+ } else if (is_p2p || !is_sta) {
+ bf_ctrl.enabled = WSM_BEACON_FILTER_ENABLE |
+ WSM_BEACON_FILTER_AUTO_ERP;
+ bf_ctrl.bcn_count = 0;
+ bf_tbl.num = __cpu_to_le32(2);
+ } else {
+ bf_ctrl.enabled = WSM_BEACON_FILTER_ENABLE;
+ bf_ctrl.bcn_count = 0;
+ bf_tbl.num = __cpu_to_le32(3);
+ }
+
+ /* When acting as p2p client being connected to p2p GO, in order to
+ * receive frames from a different p2p device, turn off bssid filter.
+ *
+ * WARNING: FW dependency!
+ * This can only be used with FW WSM371 and its successors.
+ * In that FW version even with bssid filter turned off,
+ * device will block most of the unwanted frames.
+ */
+ if (is_p2p)
+ bssid_filtering = false;
+
+ ret = wsm_set_rx_filter(priv, &priv->rx_filter);
+ if (!ret)
+ ret = wsm_set_beacon_filter_table(priv, &bf_tbl);
+ if (!ret)
+ ret = wsm_beacon_filter_control(priv, &bf_ctrl);
+ if (!ret)
+ ret = wsm_set_bssid_filtering(priv, bssid_filtering);
+ if (!ret)
+ ret = wsm_set_multicast_filter(priv, &priv->multicast_filter);
+ if (ret)
+ wiphy_err(priv->hw->wiphy,
+ "Update filtering failed: %d.\n", ret);
+ return;
+}
+
+void cw1200_update_filtering_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common,
+ update_filtering_work);
+
+ cw1200_update_filtering(priv);
+}
+
+void cw1200_set_beacon_wakeup_period_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common,
+ set_beacon_wakeup_period_work);
+
+ wsm_set_beacon_wakeup_period(priv,
+ priv->beacon_int * priv->join_dtim_period >
+ MAX_BEACON_SKIP_TIME_MS ? 1 :
+ priv->join_dtim_period, 0);
+}
+
+u64 cw1200_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
+{
+ static u8 broadcast_ipv6[ETH_ALEN] = {
+ 0x33, 0x33, 0x00, 0x00, 0x00, 0x01
+ };
+ static u8 broadcast_ipv4[ETH_ALEN] = {
+ 0x01, 0x00, 0x5e, 0x00, 0x00, 0x01
+ };
+ struct cw1200_common *priv = hw->priv;
+ struct netdev_hw_addr *ha;
+ int count = 0;
+
+ /* Disable multicast filtering */
+ priv->has_multicast_subscription = false;
+ memset(&priv->multicast_filter, 0x00, sizeof(priv->multicast_filter));
+
+ if (netdev_hw_addr_list_count(mc_list) > WSM_MAX_GRP_ADDRTABLE_ENTRIES)
+ return 0;
+
+ /* Enable if requested */
+ netdev_hw_addr_list_for_each(ha, mc_list) {
+ pr_debug("[STA] multicast: %pM\n", ha->addr);
+ memcpy(&priv->multicast_filter.macaddrs[count],
+ ha->addr, ETH_ALEN);
+ if (memcmp(ha->addr, broadcast_ipv4, ETH_ALEN) &&
+ memcmp(ha->addr, broadcast_ipv6, ETH_ALEN))
+ priv->has_multicast_subscription = true;
+ count++;
+ }
+
+ if (count) {
+ priv->multicast_filter.enable = __cpu_to_le32(1);
+ priv->multicast_filter.num_addrs = __cpu_to_le32(count);
+ }
+
+ return netdev_hw_addr_list_count(mc_list);
+}
+
+void cw1200_configure_filter(struct ieee80211_hw *dev,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct cw1200_common *priv = dev->priv;
+ bool listening = !!(*total_flags &
+ (FIF_PROMISC_IN_BSS |
+ FIF_OTHER_BSS |
+ FIF_BCN_PRBRESP_PROMISC |
+ FIF_PROBE_REQ));
+
+ *total_flags &= FIF_PROMISC_IN_BSS |
+ FIF_OTHER_BSS |
+ FIF_FCSFAIL |
+ FIF_BCN_PRBRESP_PROMISC |
+ FIF_PROBE_REQ;
+
+ down(&priv->scan.lock);
+ mutex_lock(&priv->conf_mutex);
+
+ priv->rx_filter.promiscuous = (*total_flags & FIF_PROMISC_IN_BSS)
+ ? 1 : 0;
+ priv->rx_filter.bssid = (*total_flags & (FIF_OTHER_BSS |
+ FIF_PROBE_REQ)) ? 1 : 0;
+ priv->rx_filter.fcs = (*total_flags & FIF_FCSFAIL) ? 1 : 0;
+ priv->disable_beacon_filter = !(*total_flags &
+ (FIF_BCN_PRBRESP_PROMISC |
+ FIF_PROMISC_IN_BSS |
+ FIF_PROBE_REQ));
+ if (priv->listening != listening) {
+ priv->listening = listening;
+ wsm_lock_tx(priv);
+ cw1200_update_listening(priv, listening);
+ wsm_unlock_tx(priv);
+ }
+ cw1200_update_filtering(priv);
+ mutex_unlock(&priv->conf_mutex);
+ up(&priv->scan.lock);
+}
+
+int cw1200_conf_tx(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params)
+{
+ struct cw1200_common *priv = dev->priv;
+ int ret = 0;
+ /* To prevent re-applying PM request OID again and again*/
+ bool old_uapsd_flags;
+
+ mutex_lock(&priv->conf_mutex);
+
+ if (queue < dev->queues) {
+ old_uapsd_flags = le16_to_cpu(priv->uapsd_info.uapsd_flags);
+
+ WSM_TX_QUEUE_SET(&priv->tx_queue_params, queue, 0, 0, 0);
+ ret = wsm_set_tx_queue_params(priv,
+ &priv->tx_queue_params.params[queue], queue);
+ if (ret) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ WSM_EDCA_SET(&priv->edca, queue, params->aifs,
+ params->cw_min, params->cw_max,
+ params->txop, 0xc8,
+ params->uapsd);
+ ret = wsm_set_edca_params(priv, &priv->edca);
+ if (ret) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (priv->mode == NL80211_IFTYPE_STATION) {
+ ret = cw1200_set_uapsd_param(priv, &priv->edca);
+ if (!ret && priv->setbssparams_done &&
+ (priv->join_status == CW1200_JOIN_STATUS_STA) &&
+ (old_uapsd_flags != le16_to_cpu(priv->uapsd_info.uapsd_flags)))
+ ret = cw1200_set_pm(priv, &priv->powersave_mode);
+ }
+ } else {
+ ret = -EINVAL;
+ }
+
+out:
+ mutex_unlock(&priv->conf_mutex);
+ return ret;
+}
+
+int cw1200_get_stats(struct ieee80211_hw *dev,
+ struct ieee80211_low_level_stats *stats)
+{
+ struct cw1200_common *priv = dev->priv;
+
+ memcpy(stats, &priv->stats, sizeof(*stats));
+ return 0;
+}
+
+int cw1200_set_pm(struct cw1200_common *priv, const struct wsm_set_pm *arg)
+{
+ struct wsm_set_pm pm = *arg;
+
+ if (priv->uapsd_info.uapsd_flags != 0)
+ pm.mode &= ~WSM_PSM_FAST_PS_FLAG;
+
+ if (memcmp(&pm, &priv->firmware_ps_mode,
+ sizeof(struct wsm_set_pm))) {
+ priv->firmware_ps_mode = pm;
+ return wsm_set_pm(priv, &pm);
+ } else {
+ return 0;
+ }
+}
+
+int cw1200_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ int ret = -EOPNOTSUPP;
+ struct cw1200_common *priv = dev->priv;
+ struct ieee80211_key_seq seq;
+
+ mutex_lock(&priv->conf_mutex);
+
+ if (cmd == SET_KEY) {
+ u8 *peer_addr = NULL;
+ int pairwise = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) ?
+ 1 : 0;
+ int idx = cw1200_alloc_key(priv);
+ struct wsm_add_key *wsm_key = &priv->keys[idx];
+
+ if (idx < 0) {
+ ret = -EINVAL;
+ goto finally;
+ }
+
+ if (sta)
+ peer_addr = sta->addr;
+
+ key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ if (key->keylen > 16) {
+ cw1200_free_key(priv, idx);
+ ret = -EINVAL;
+ goto finally;
+ }
+
+ if (pairwise) {
+ wsm_key->type = WSM_KEY_TYPE_WEP_PAIRWISE;
+ memcpy(wsm_key->wep_pairwise.peer,
+ peer_addr, ETH_ALEN);
+ memcpy(wsm_key->wep_pairwise.keydata,
+ &key->key[0], key->keylen);
+ wsm_key->wep_pairwise.keylen = key->keylen;
+ } else {
+ wsm_key->type = WSM_KEY_TYPE_WEP_DEFAULT;
+ memcpy(wsm_key->wep_group.keydata,
+ &key->key[0], key->keylen);
+ wsm_key->wep_group.keylen = key->keylen;
+ wsm_key->wep_group.keyid = key->keyidx;
+ }
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ ieee80211_get_key_rx_seq(key, 0, &seq);
+ if (pairwise) {
+ wsm_key->type = WSM_KEY_TYPE_TKIP_PAIRWISE;
+ memcpy(wsm_key->tkip_pairwise.peer,
+ peer_addr, ETH_ALEN);
+ memcpy(wsm_key->tkip_pairwise.keydata,
+ &key->key[0], 16);
+ memcpy(wsm_key->tkip_pairwise.tx_mic_key,
+ &key->key[16], 8);
+ memcpy(wsm_key->tkip_pairwise.rx_mic_key,
+ &key->key[24], 8);
+ } else {
+ size_t mic_offset =
+ (priv->mode == NL80211_IFTYPE_AP) ?
+ 16 : 24;
+ wsm_key->type = WSM_KEY_TYPE_TKIP_GROUP;
+ memcpy(wsm_key->tkip_group.keydata,
+ &key->key[0], 16);
+ memcpy(wsm_key->tkip_group.rx_mic_key,
+ &key->key[mic_offset], 8);
+
+ wsm_key->tkip_group.rx_seqnum[0] = seq.tkip.iv16 & 0xff;
+ wsm_key->tkip_group.rx_seqnum[1] = (seq.tkip.iv16 >> 8) & 0xff;
+ wsm_key->tkip_group.rx_seqnum[2] = seq.tkip.iv32 & 0xff;
+ wsm_key->tkip_group.rx_seqnum[3] = (seq.tkip.iv32 >> 8) & 0xff;
+ wsm_key->tkip_group.rx_seqnum[4] = (seq.tkip.iv32 >> 16) & 0xff;
+ wsm_key->tkip_group.rx_seqnum[5] = (seq.tkip.iv32 >> 24) & 0xff;
+ wsm_key->tkip_group.rx_seqnum[6] = 0;
+ wsm_key->tkip_group.rx_seqnum[7] = 0;
+
+ wsm_key->tkip_group.keyid = key->keyidx;
+ }
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ ieee80211_get_key_rx_seq(key, 0, &seq);
+ if (pairwise) {
+ wsm_key->type = WSM_KEY_TYPE_AES_PAIRWISE;
+ memcpy(wsm_key->aes_pairwise.peer,
+ peer_addr, ETH_ALEN);
+ memcpy(wsm_key->aes_pairwise.keydata,
+ &key->key[0], 16);
+ } else {
+ wsm_key->type = WSM_KEY_TYPE_AES_GROUP;
+ memcpy(wsm_key->aes_group.keydata,
+ &key->key[0], 16);
+
+ wsm_key->aes_group.rx_seqnum[0] = seq.ccmp.pn[5];
+ wsm_key->aes_group.rx_seqnum[1] = seq.ccmp.pn[4];
+ wsm_key->aes_group.rx_seqnum[2] = seq.ccmp.pn[3];
+ wsm_key->aes_group.rx_seqnum[3] = seq.ccmp.pn[2];
+ wsm_key->aes_group.rx_seqnum[4] = seq.ccmp.pn[1];
+ wsm_key->aes_group.rx_seqnum[5] = seq.ccmp.pn[0];
+ wsm_key->aes_group.rx_seqnum[6] = 0;
+ wsm_key->aes_group.rx_seqnum[7] = 0;
+ wsm_key->aes_group.keyid = key->keyidx;
+ }
+ break;
+ case WLAN_CIPHER_SUITE_SMS4:
+ if (pairwise) {
+ wsm_key->type = WSM_KEY_TYPE_WAPI_PAIRWISE;
+ memcpy(wsm_key->wapi_pairwise.peer,
+ peer_addr, ETH_ALEN);
+ memcpy(wsm_key->wapi_pairwise.keydata,
+ &key->key[0], 16);
+ memcpy(wsm_key->wapi_pairwise.mic_key,
+ &key->key[16], 16);
+ wsm_key->wapi_pairwise.keyid = key->keyidx;
+ } else {
+ wsm_key->type = WSM_KEY_TYPE_WAPI_GROUP;
+ memcpy(wsm_key->wapi_group.keydata,
+ &key->key[0], 16);
+ memcpy(wsm_key->wapi_group.mic_key,
+ &key->key[16], 16);
+ wsm_key->wapi_group.keyid = key->keyidx;
+ }
+ break;
+ default:
+ pr_warn("Unhandled key type %d\n", key->cipher);
+ cw1200_free_key(priv, idx);
+ ret = -EOPNOTSUPP;
+ goto finally;
+ }
+ ret = wsm_add_key(priv, wsm_key);
+ if (!ret)
+ key->hw_key_idx = idx;
+ else
+ cw1200_free_key(priv, idx);
+ } else if (cmd == DISABLE_KEY) {
+ struct wsm_remove_key wsm_key = {
+ .index = key->hw_key_idx,
+ };
+
+ if (wsm_key.index > WSM_KEY_MAX_INDEX) {
+ ret = -EINVAL;
+ goto finally;
+ }
+
+ cw1200_free_key(priv, wsm_key.index);
+ ret = wsm_remove_key(priv, &wsm_key);
+ } else {
+ pr_warn("Unhandled key command %d\n", cmd);
+ }
+
+finally:
+ mutex_unlock(&priv->conf_mutex);
+ return ret;
+}
+
+void cw1200_wep_key_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, wep_key_work);
+ u8 queue_id = cw1200_queue_get_queue_id(priv->pending_frame_id);
+ struct cw1200_queue *queue = &priv->tx_queue[queue_id];
+ __le32 wep_default_key_id = __cpu_to_le32(
+ priv->wep_default_key_id);
+
+ pr_debug("[STA] Setting default WEP key: %d\n",
+ priv->wep_default_key_id);
+ wsm_flush_tx(priv);
+ wsm_write_mib(priv, WSM_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID,
+ &wep_default_key_id, sizeof(wep_default_key_id));
+ cw1200_queue_requeue(queue, priv->pending_frame_id);
+ wsm_unlock_tx(priv);
+}
+
+int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ int ret = 0;
+ __le32 val32;
+ struct cw1200_common *priv = hw->priv;
+
+ if (priv->mode == NL80211_IFTYPE_UNSPECIFIED)
+ return 0;
+
+ if (value != (u32) -1)
+ val32 = __cpu_to_le32(value);
+ else
+ val32 = 0; /* disabled */
+
+ if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) {
+ /* device is down, can _not_ set threshold */
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (priv->rts_threshold == value)
+ goto out;
+
+ pr_debug("[STA] Setting RTS threshold: %d\n",
+ priv->rts_threshold);
+
+ /* mutex_lock(&priv->conf_mutex); */
+ ret = wsm_write_mib(priv, WSM_MIB_ID_DOT11_RTS_THRESHOLD,
+ &val32, sizeof(val32));
+ if (!ret)
+ priv->rts_threshold = value;
+ /* mutex_unlock(&priv->conf_mutex); */
+
+out:
+ return ret;
+}
+
+/* If successful, LOCKS the TX queue! */
+static int __cw1200_flush(struct cw1200_common *priv, bool drop)
+{
+ int i, ret;
+
+ for (;;) {
+ /* TODO: correct flush handling is required when dev_stop.
+ * Temporary workaround: 2s
+ */
+ if (drop) {
+ for (i = 0; i < 4; ++i)
+ cw1200_queue_clear(&priv->tx_queue[i]);
+ } else {
+ ret = wait_event_timeout(
+ priv->tx_queue_stats.wait_link_id_empty,
+ cw1200_queue_stats_is_empty(
+ &priv->tx_queue_stats, -1),
+ 2 * HZ);
+ }
+
+ if (!drop && ret <= 0) {
+ ret = -ETIMEDOUT;
+ break;
+ } else {
+ ret = 0;
+ }
+
+ wsm_lock_tx(priv);
+ if (!cw1200_queue_stats_is_empty(&priv->tx_queue_stats, -1)) {
+ /* Highly unlikely: WSM requeued frames. */
+ wsm_unlock_tx(priv);
+ continue;
+ }
+ break;
+ }
+ return ret;
+}
+
+void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+{
+ struct cw1200_common *priv = hw->priv;
+
+ switch (priv->mode) {
+ case NL80211_IFTYPE_MONITOR:
+ drop = true;
+ break;
+ case NL80211_IFTYPE_AP:
+ if (!priv->enable_beacon)
+ drop = true;
+ break;
+ }
+
+ if (!__cw1200_flush(priv, drop))
+ wsm_unlock_tx(priv);
+
+ return;
+}
+
+/* ******************************************************************** */
+/* WSM callbacks */
+
+void cw1200_free_event_queue(struct cw1200_common *priv)
+{
+ LIST_HEAD(list);
+
+ spin_lock(&priv->event_queue_lock);
+ list_splice_init(&priv->event_queue, &list);
+ spin_unlock(&priv->event_queue_lock);
+
+ __cw1200_free_event_queue(&list);
+}
+
+void cw1200_event_handler(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, event_handler);
+ struct cw1200_wsm_event *event;
+ LIST_HEAD(list);
+
+ spin_lock(&priv->event_queue_lock);
+ list_splice_init(&priv->event_queue, &list);
+ spin_unlock(&priv->event_queue_lock);
+
+ list_for_each_entry(event, &list, link) {
+ switch (event->evt.id) {
+ case WSM_EVENT_ERROR:
+ pr_err("Unhandled WSM Error from LMAC\n");
+ break;
+ case WSM_EVENT_BSS_LOST:
+ pr_debug("[CQM] BSS lost.\n");
+ cancel_work_sync(&priv->unjoin_work);
+ if (!down_trylock(&priv->scan.lock)) {
+ cw1200_cqm_bssloss_sm(priv, 1, 0, 0);
+ up(&priv->scan.lock);
+ } else {
+ /* Scan is in progress. Delay reporting.
+ * Scan complete will trigger bss_loss_work
+ */
+ priv->delayed_link_loss = 1;
+ /* Also start a watchdog. */
+ queue_delayed_work(priv->workqueue,
+ &priv->bss_loss_work, 5*HZ);
+ }
+ break;
+ case WSM_EVENT_BSS_REGAINED:
+ pr_debug("[CQM] BSS regained.\n");
+ cw1200_cqm_bssloss_sm(priv, 0, 0, 0);
+ cancel_work_sync(&priv->unjoin_work);
+ break;
+ case WSM_EVENT_RADAR_DETECTED:
+ wiphy_info(priv->hw->wiphy, "radar pulse detected\n");
+ break;
+ case WSM_EVENT_RCPI_RSSI:
+ {
+ /* RSSI: signed Q8.0, RCPI: unsigned Q7.1
+ * RSSI = RCPI / 2 - 110
+ */
+ int rcpi_rssi = (int)(event->evt.data & 0xFF);
+ int cqm_evt;
+ if (priv->cqm_use_rssi)
+ rcpi_rssi = (s8)rcpi_rssi;
+ else
+ rcpi_rssi = rcpi_rssi / 2 - 110;
+
+ cqm_evt = (rcpi_rssi <= priv->cqm_rssi_thold) ?
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW :
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
+ pr_debug("[CQM] RSSI event: %d.\n", rcpi_rssi);
+ ieee80211_cqm_rssi_notify(priv->vif, cqm_evt,
+ GFP_KERNEL);
+ break;
+ }
+ case WSM_EVENT_BT_INACTIVE:
+ pr_warn("Unhandled BT INACTIVE from LMAC\n");
+ break;
+ case WSM_EVENT_BT_ACTIVE:
+ pr_warn("Unhandled BT ACTIVE from LMAC\n");
+ break;
+ }
+ }
+ __cw1200_free_event_queue(&list);
+}
+
+void cw1200_bss_loss_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, bss_loss_work.work);
+
+ pr_debug("[CQM] Reporting connection loss.\n");
+ wsm_lock_tx(priv);
+ if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0)
+ wsm_unlock_tx(priv);
+}
+
+void cw1200_bss_params_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, bss_params_work);
+ mutex_lock(&priv->conf_mutex);
+
+ priv->bss_params.reset_beacon_loss = 1;
+ wsm_set_bss_params(priv, &priv->bss_params);
+ priv->bss_params.reset_beacon_loss = 0;
+
+ mutex_unlock(&priv->conf_mutex);
+}
+
+/* ******************************************************************** */
+/* Internal API */
+
+/* This function is called to Parse the SDD file
+ * to extract listen_interval and PTA related information
+ * sdd is a TLV: u8 id, u8 len, u8 data[]
+ */
+static int cw1200_parse_sdd_file(struct cw1200_common *priv)
+{
+ const u8 *p = priv->sdd->data;
+ int ret = 0;
+
+ while (p + 2 <= priv->sdd->data + priv->sdd->size) {
+ if (p + p[1] + 2 > priv->sdd->data + priv->sdd->size) {
+ pr_warn("Malformed sdd structure\n");
+ return -1;
+ }
+ switch (p[0]) {
+ case SDD_PTA_CFG_ELT_ID: {
+ u16 v;
+ if (p[1] < 4) {
+ pr_warn("SDD_PTA_CFG_ELT_ID malformed\n");
+ ret = -1;
+ break;
+ }
+ v = le16_to_cpu(*((__le16 *)(p + 2)));
+ if (!v) /* non-zero means this is enabled */
+ break;
+
+ v = le16_to_cpu(*((__le16 *)(p + 4)));
+ priv->conf_listen_interval = (v >> 7) & 0x1F;
+ pr_debug("PTA found; Listen Interval %d\n",
+ priv->conf_listen_interval);
+ break;
+ }
+ case SDD_REFERENCE_FREQUENCY_ELT_ID: {
+ u16 clk = le16_to_cpu(*((__le16 *)(p + 2)));
+ if (clk != priv->hw_refclk)
+ pr_warn("SDD file doesn't match configured refclk (%d vs %d)\n",
+ clk, priv->hw_refclk);
+ break;
+ }
+ default:
+ break;
+ }
+ p += p[1] + 2;
+ }
+
+ if (!priv->bt_present) {
+ pr_debug("PTA element NOT found.\n");
+ priv->conf_listen_interval = 0;
+ }
+ return ret;
+}
+
+int cw1200_setup_mac(struct cw1200_common *priv)
+{
+ int ret = 0;
+
+ /* NOTE: There is a bug in FW: it reports signal
+ * as RSSI if RSSI subscription is enabled.
+ * It's not enough to set WSM_RCPI_RSSI_USE_RSSI.
+ *
+ * NOTE2: RSSI based reports have been switched to RCPI, since
+ * FW has a bug and RSSI reported values are not stable,
+ * what can leads to signal level oscilations in user-end applications
+ */
+ struct wsm_rcpi_rssi_threshold threshold = {
+ .rssiRcpiMode = WSM_RCPI_RSSI_THRESHOLD_ENABLE |
+ WSM_RCPI_RSSI_DONT_USE_UPPER |
+ WSM_RCPI_RSSI_DONT_USE_LOWER,
+ .rollingAverageCount = 16,
+ };
+
+ struct wsm_configuration cfg = {
+ .dot11StationId = &priv->mac_addr[0],
+ };
+
+ /* Remember the decission here to make sure, we will handle
+ * the RCPI/RSSI value correctly on WSM_EVENT_RCPI_RSS
+ */
+ if (threshold.rssiRcpiMode & WSM_RCPI_RSSI_USE_RSSI)
+ priv->cqm_use_rssi = true;
+
+ if (!priv->sdd) {
+ ret = request_firmware(&priv->sdd, priv->sdd_path, priv->pdev);
+ if (ret) {
+ pr_err("Can't load sdd file %s.\n", priv->sdd_path);
+ return ret;
+ }
+ cw1200_parse_sdd_file(priv);
+ }
+
+ cfg.dpdData = priv->sdd->data;
+ cfg.dpdData_size = priv->sdd->size;
+ ret = wsm_configuration(priv, &cfg);
+ if (ret)
+ return ret;
+
+ /* Configure RSSI/SCPI reporting as RSSI. */
+ wsm_set_rcpi_rssi_threshold(priv, &threshold);
+
+ return 0;
+}
+
+static void cw1200_join_complete(struct cw1200_common *priv)
+{
+ pr_debug("[STA] Join complete (%d)\n", priv->join_complete_status);
+
+ priv->join_pending = false;
+ if (priv->join_complete_status) {
+ priv->join_status = CW1200_JOIN_STATUS_PASSIVE;
+ cw1200_update_listening(priv, priv->listening);
+ cw1200_do_unjoin(priv);
+ ieee80211_connection_loss(priv->vif);
+ } else {
+ if (priv->mode == NL80211_IFTYPE_ADHOC)
+ priv->join_status = CW1200_JOIN_STATUS_IBSS;
+ else
+ priv->join_status = CW1200_JOIN_STATUS_PRE_STA;
+ }
+ wsm_unlock_tx(priv); /* Clearing the lock held before do_join() */
+}
+
+void cw1200_join_complete_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, join_complete_work);
+ mutex_lock(&priv->conf_mutex);
+ cw1200_join_complete(priv);
+ mutex_unlock(&priv->conf_mutex);
+}
+
+void cw1200_join_complete_cb(struct cw1200_common *priv,
+ struct wsm_join_complete *arg)
+{
+ pr_debug("[STA] cw1200_join_complete_cb called, status=%d.\n",
+ arg->status);
+
+ if (cancel_delayed_work(&priv->join_timeout)) {
+ priv->join_complete_status = arg->status;
+ queue_work(priv->workqueue, &priv->join_complete_work);
+ }
+}
+
+/* MUST be called with tx_lock held! It will be unlocked for us. */
+static void cw1200_do_join(struct cw1200_common *priv)
+{
+ const u8 *bssid;
+ struct ieee80211_bss_conf *conf = &priv->vif->bss_conf;
+ struct cfg80211_bss *bss = NULL;
+ struct wsm_protected_mgmt_policy mgmt_policy;
+ struct wsm_join join = {
+ .mode = conf->ibss_joined ?
+ WSM_JOIN_MODE_IBSS : WSM_JOIN_MODE_BSS,
+ .preamble_type = WSM_JOIN_PREAMBLE_LONG,
+ .probe_for_join = 1,
+ .atim_window = 0,
+ .basic_rate_set = cw1200_rate_mask_to_wsm(priv,
+ conf->basic_rates),
+ };
+ if (delayed_work_pending(&priv->join_timeout)) {
+ pr_warn("[STA] - Join request already pending, skipping..\n");
+ wsm_unlock_tx(priv);
+ return;
+ }
+
+ if (priv->join_status)
+ cw1200_do_unjoin(priv);
+
+ bssid = priv->vif->bss_conf.bssid;
+
+ bss = cfg80211_get_bss(priv->hw->wiphy, priv->channel,
+ bssid, NULL, 0, 0, 0);
+
+ if (!bss && !conf->ibss_joined) {
+ wsm_unlock_tx(priv);
+ return;
+ }
+
+ mutex_lock(&priv->conf_mutex);
+
+ /* Under the conf lock: check scan status and
+ * bail out if it is in progress.
+ */
+ if (atomic_read(&priv->scan.in_progress)) {
+ wsm_unlock_tx(priv);
+ goto done_put;
+ }
+
+ priv->join_pending = true;
+
+ /* Sanity check basic rates */
+ if (!join.basic_rate_set)
+ join.basic_rate_set = 7;
+
+ /* Sanity check beacon interval */
+ if (!priv->beacon_int)
+ priv->beacon_int = 1;
+
+ join.beacon_interval = priv->beacon_int;
+
+ /* BT Coex related changes */
+ if (priv->bt_present) {
+ if (((priv->conf_listen_interval * 100) %
+ priv->beacon_int) == 0)
+ priv->listen_interval =
+ ((priv->conf_listen_interval * 100) /
+ priv->beacon_int);
+ else
+ priv->listen_interval =
+ ((priv->conf_listen_interval * 100) /
+ priv->beacon_int + 1);
+ }
+
+ if (priv->hw->conf.ps_dtim_period)
+ priv->join_dtim_period = priv->hw->conf.ps_dtim_period;
+ join.dtim_period = priv->join_dtim_period;
+
+ join.channel_number = priv->channel->hw_value;
+ join.band = (priv->channel->band == IEEE80211_BAND_5GHZ) ?
+ WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G;
+
+ memcpy(join.bssid, bssid, sizeof(join.bssid));
+
+ pr_debug("[STA] Join BSSID: %pM DTIM: %d, interval: %d\n",
+ join.bssid,
+ join.dtim_period, priv->beacon_int);
+
+ if (!conf->ibss_joined) {
+ const u8 *ssidie;
+ rcu_read_lock();
+ ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
+ if (ssidie) {
+ join.ssid_len = ssidie[1];
+ memcpy(join.ssid, &ssidie[2], join.ssid_len);
+ }
+ rcu_read_unlock();
+ }
+
+ if (priv->vif->p2p) {
+ join.flags |= WSM_JOIN_FLAGS_P2P_GO;
+ join.basic_rate_set =
+ cw1200_rate_mask_to_wsm(priv, 0xFF0);
+ }
+
+ /* Enable asynchronous join calls */
+ if (!conf->ibss_joined) {
+ join.flags |= WSM_JOIN_FLAGS_FORCE;
+ join.flags |= WSM_JOIN_FLAGS_FORCE_WITH_COMPLETE_IND;
+ }
+
+ wsm_flush_tx(priv);
+
+ /* Stay Awake for Join and Auth Timeouts and a bit more */
+ cw1200_pm_stay_awake(&priv->pm_state,
+ CW1200_JOIN_TIMEOUT + CW1200_AUTH_TIMEOUT);
+
+ cw1200_update_listening(priv, false);
+
+ /* Turn on Block ACKs */
+ wsm_set_block_ack_policy(priv, priv->ba_tx_tid_mask,
+ priv->ba_rx_tid_mask);
+
+ /* Set up timeout */
+ if (join.flags & WSM_JOIN_FLAGS_FORCE_WITH_COMPLETE_IND) {
+ priv->join_status = CW1200_JOIN_STATUS_JOINING;
+ queue_delayed_work(priv->workqueue,
+ &priv->join_timeout,
+ CW1200_JOIN_TIMEOUT);
+ }
+
+ /* 802.11w protected mgmt frames */
+ mgmt_policy.protectedMgmtEnable = 0;
+ mgmt_policy.unprotectedMgmtFramesAllowed = 1;
+ mgmt_policy.encryptionForAuthFrame = 1;
+ wsm_set_protected_mgmt_policy(priv, &mgmt_policy);
+
+ /* Perform actual join */
+ if (wsm_join(priv, &join)) {
+ pr_err("[STA] cw1200_join_work: wsm_join failed!\n");
+ cancel_delayed_work_sync(&priv->join_timeout);
+ cw1200_update_listening(priv, priv->listening);
+ /* Tx lock still held, unjoin will clear it. */
+ if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0)
+ wsm_unlock_tx(priv);
+ } else {
+ if (!(join.flags & WSM_JOIN_FLAGS_FORCE_WITH_COMPLETE_IND))
+ cw1200_join_complete(priv); /* Will clear tx_lock */
+
+ /* Upload keys */
+ cw1200_upload_keys(priv);
+
+ /* Due to beacon filtering it is possible that the
+ * AP's beacon is not known for the mac80211 stack.
+ * Disable filtering temporary to make sure the stack
+ * receives at least one
+ */
+ priv->disable_beacon_filter = true;
+ }
+ cw1200_update_filtering(priv);
+
+done_put:
+ mutex_unlock(&priv->conf_mutex);
+ if (bss)
+ cfg80211_put_bss(priv->hw->wiphy, bss);
+}
+
+void cw1200_join_timeout(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, join_timeout.work);
+ pr_debug("[WSM] Join timed out.\n");
+ wsm_lock_tx(priv);
+ if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0)
+ wsm_unlock_tx(priv);
+}
+
+static void cw1200_do_unjoin(struct cw1200_common *priv)
+{
+ struct wsm_reset reset = {
+ .reset_statistics = true,
+ };
+
+ cancel_delayed_work_sync(&priv->join_timeout);
+
+ mutex_lock(&priv->conf_mutex);
+ priv->join_pending = false;
+
+ if (atomic_read(&priv->scan.in_progress)) {
+ if (priv->delayed_unjoin)
+ wiphy_dbg(priv->hw->wiphy, "Delayed unjoin is already scheduled.\n");
+ else
+ priv->delayed_unjoin = true;
+ goto done;
+ }
+
+ priv->delayed_link_loss = false;
+
+ if (!priv->join_status)
+ goto done;
+
+ if (priv->join_status > CW1200_JOIN_STATUS_IBSS) {
+ wiphy_err(priv->hw->wiphy, "Unexpected: join status: %d\n",
+ priv->join_status);
+ BUG_ON(1);
+ }
+
+ cancel_work_sync(&priv->update_filtering_work);
+ cancel_work_sync(&priv->set_beacon_wakeup_period_work);
+ priv->join_status = CW1200_JOIN_STATUS_PASSIVE;
+
+ /* Unjoin is a reset. */
+ wsm_flush_tx(priv);
+ wsm_keep_alive_period(priv, 0);
+ wsm_reset(priv, &reset);
+ wsm_set_output_power(priv, priv->output_power * 10);
+ priv->join_dtim_period = 0;
+ cw1200_setup_mac(priv);
+ cw1200_free_event_queue(priv);
+ cancel_work_sync(&priv->event_handler);
+ cw1200_update_listening(priv, priv->listening);
+ cw1200_cqm_bssloss_sm(priv, 0, 0, 0);
+
+ /* Disable Block ACKs */
+ wsm_set_block_ack_policy(priv, 0, 0);
+
+ priv->disable_beacon_filter = false;
+ cw1200_update_filtering(priv);
+ memset(&priv->association_mode, 0,
+ sizeof(priv->association_mode));
+ memset(&priv->bss_params, 0, sizeof(priv->bss_params));
+ priv->setbssparams_done = false;
+ memset(&priv->firmware_ps_mode, 0,
+ sizeof(priv->firmware_ps_mode));
+
+ pr_debug("[STA] Unjoin completed.\n");
+
+done:
+ mutex_unlock(&priv->conf_mutex);
+}
+
+void cw1200_unjoin_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, unjoin_work);
+
+ cw1200_do_unjoin(priv);
+
+ /* Tell the stack we're dead */
+ ieee80211_connection_loss(priv->vif);
+
+ wsm_unlock_tx(priv);
+}
+
+int cw1200_enable_listening(struct cw1200_common *priv)
+{
+ struct wsm_start start = {
+ .mode = WSM_START_MODE_P2P_DEV,
+ .band = WSM_PHY_BAND_2_4G,
+ .beacon_interval = 100,
+ .dtim_period = 1,
+ .probe_delay = 0,
+ .basic_rate_set = 0x0F,
+ };
+
+ if (priv->channel) {
+ start.band = priv->channel->band == IEEE80211_BAND_5GHZ ?
+ WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G;
+ start.channel_number = priv->channel->hw_value;
+ } else {
+ start.band = WSM_PHY_BAND_2_4G;
+ start.channel_number = 1;
+ }
+
+ return wsm_start(priv, &start);
+}
+
+int cw1200_disable_listening(struct cw1200_common *priv)
+{
+ int ret;
+ struct wsm_reset reset = {
+ .reset_statistics = true,
+ };
+ ret = wsm_reset(priv, &reset);
+ return ret;
+}
+
+void cw1200_update_listening(struct cw1200_common *priv, bool enabled)
+{
+ if (enabled) {
+ if (priv->join_status == CW1200_JOIN_STATUS_PASSIVE) {
+ if (!cw1200_enable_listening(priv))
+ priv->join_status = CW1200_JOIN_STATUS_MONITOR;
+ wsm_set_probe_responder(priv, true);
+ }
+ } else {
+ if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) {
+ if (!cw1200_disable_listening(priv))
+ priv->join_status = CW1200_JOIN_STATUS_PASSIVE;
+ wsm_set_probe_responder(priv, false);
+ }
+ }
+}
+
+int cw1200_set_uapsd_param(struct cw1200_common *priv,
+ const struct wsm_edca_params *arg)
+{
+ int ret;
+ u16 uapsd_flags = 0;
+
+ /* Here's the mapping AC [queue, bit]
+ * VO [0,3], VI [1, 2], BE [2, 1], BK [3, 0]
+ */
+
+ if (arg->uapsd_enable[0])
+ uapsd_flags |= 1 << 3;
+
+ if (arg->uapsd_enable[1])
+ uapsd_flags |= 1 << 2;
+
+ if (arg->uapsd_enable[2])
+ uapsd_flags |= 1 << 1;
+
+ if (arg->uapsd_enable[3])
+ uapsd_flags |= 1;
+
+ /* Currently pseudo U-APSD operation is not supported, so setting
+ * MinAutoTriggerInterval, MaxAutoTriggerInterval and
+ * AutoTriggerStep to 0
+ */
+
+ priv->uapsd_info.uapsd_flags = cpu_to_le16(uapsd_flags);
+ priv->uapsd_info.min_auto_trigger_interval = 0;
+ priv->uapsd_info.max_auto_trigger_interval = 0;
+ priv->uapsd_info.auto_trigger_step = 0;
+
+ ret = wsm_set_uapsd_info(priv, &priv->uapsd_info);
+ return ret;
+}
+
+/* ******************************************************************** */
+/* AP API */
+
+int cw1200_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct cw1200_common *priv = hw->priv;
+ struct cw1200_sta_priv *sta_priv =
+ (struct cw1200_sta_priv *)&sta->drv_priv;
+ struct cw1200_link_entry *entry;
+ struct sk_buff *skb;
+
+ if (priv->mode != NL80211_IFTYPE_AP)
+ return 0;
+
+ sta_priv->link_id = cw1200_find_link_id(priv, sta->addr);
+ if (WARN_ON(!sta_priv->link_id)) {
+ wiphy_info(priv->hw->wiphy,
+ "[AP] No more link IDs available.\n");
+ return -ENOENT;
+ }
+
+ entry = &priv->link_id_db[sta_priv->link_id - 1];
+ spin_lock_bh(&priv->ps_state_lock);
+ if ((sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) ==
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
+ priv->sta_asleep_mask |= BIT(sta_priv->link_id);
+ entry->status = CW1200_LINK_HARD;
+ while ((skb = skb_dequeue(&entry->rx_queue)))
+ ieee80211_rx_irqsafe(priv->hw, skb);
+ spin_unlock_bh(&priv->ps_state_lock);
+ return 0;
+}
+
+int cw1200_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct cw1200_common *priv = hw->priv;
+ struct cw1200_sta_priv *sta_priv =
+ (struct cw1200_sta_priv *)&sta->drv_priv;
+ struct cw1200_link_entry *entry;
+
+ if (priv->mode != NL80211_IFTYPE_AP || !sta_priv->link_id)
+ return 0;
+
+ entry = &priv->link_id_db[sta_priv->link_id - 1];
+ spin_lock_bh(&priv->ps_state_lock);
+ entry->status = CW1200_LINK_RESERVE;
+ entry->timestamp = jiffies;
+ wsm_lock_tx_async(priv);
+ if (queue_work(priv->workqueue, &priv->link_id_work) <= 0)
+ wsm_unlock_tx(priv);
+ spin_unlock_bh(&priv->ps_state_lock);
+ flush_workqueue(priv->workqueue);
+ return 0;
+}
+
+static void __cw1200_sta_notify(struct ieee80211_hw *dev,
+ struct ieee80211_vif *vif,
+ enum sta_notify_cmd notify_cmd,
+ int link_id)
+{
+ struct cw1200_common *priv = dev->priv;
+ u32 bit, prev;
+
+ /* Zero link id means "for all link IDs" */
+ if (link_id)
+ bit = BIT(link_id);
+ else if (WARN_ON_ONCE(notify_cmd != STA_NOTIFY_AWAKE))
+ bit = 0;
+ else
+ bit = priv->link_id_map;
+ prev = priv->sta_asleep_mask & bit;
+
+ switch (notify_cmd) {
+ case STA_NOTIFY_SLEEP:
+ if (!prev) {
+ if (priv->buffered_multicasts &&
+ !priv->sta_asleep_mask)
+ queue_work(priv->workqueue,
+ &priv->multicast_start_work);
+ priv->sta_asleep_mask |= bit;
+ }
+ break;
+ case STA_NOTIFY_AWAKE:
+ if (prev) {
+ priv->sta_asleep_mask &= ~bit;
+ priv->pspoll_mask &= ~bit;
+ if (priv->tx_multicast && link_id &&
+ !priv->sta_asleep_mask)
+ queue_work(priv->workqueue,
+ &priv->multicast_stop_work);
+ cw1200_bh_wakeup(priv);
+ }
+ break;
+ }
+}
+
+void cw1200_sta_notify(struct ieee80211_hw *dev,
+ struct ieee80211_vif *vif,
+ enum sta_notify_cmd notify_cmd,
+ struct ieee80211_sta *sta)
+{
+ struct cw1200_common *priv = dev->priv;
+ struct cw1200_sta_priv *sta_priv =
+ (struct cw1200_sta_priv *)&sta->drv_priv;
+
+ spin_lock_bh(&priv->ps_state_lock);
+ __cw1200_sta_notify(dev, vif, notify_cmd, sta_priv->link_id);
+ spin_unlock_bh(&priv->ps_state_lock);
+}
+
+static void cw1200_ps_notify(struct cw1200_common *priv,
+ int link_id, bool ps)
+{
+ if (link_id > CW1200_MAX_STA_IN_AP_MODE)
+ return;
+
+ pr_debug("%s for LinkId: %d. STAs asleep: %.8X\n",
+ ps ? "Stop" : "Start",
+ link_id, priv->sta_asleep_mask);
+
+ __cw1200_sta_notify(priv->hw, priv->vif,
+ ps ? STA_NOTIFY_SLEEP : STA_NOTIFY_AWAKE, link_id);
+}
+
+static int cw1200_set_tim_impl(struct cw1200_common *priv, bool aid0_bit_set)
+{
+ struct sk_buff *skb;
+ struct wsm_update_ie update_ie = {
+ .what = WSM_UPDATE_IE_BEACON,
+ .count = 1,
+ };
+ u16 tim_offset, tim_length;
+
+ pr_debug("[AP] mcast: %s.\n", aid0_bit_set ? "ena" : "dis");
+
+ skb = ieee80211_beacon_get_tim(priv->hw, priv->vif,
+ &tim_offset, &tim_length);
+ if (!skb) {
+ if (!__cw1200_flush(priv, true))
+ wsm_unlock_tx(priv);
+ return -ENOENT;
+ }
+
+ if (tim_offset && tim_length >= 6) {
+ /* Ignore DTIM count from mac80211:
+ * firmware handles DTIM internally.
+ */
+ skb->data[tim_offset + 2] = 0;
+
+ /* Set/reset aid0 bit */
+ if (aid0_bit_set)
+ skb->data[tim_offset + 4] |= 1;
+ else
+ skb->data[tim_offset + 4] &= ~1;
+ }
+
+ update_ie.ies = &skb->data[tim_offset];
+ update_ie.length = tim_length;
+ wsm_update_ie(priv, &update_ie);
+
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+void cw1200_set_tim_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, set_tim_work);
+ (void)cw1200_set_tim_impl(priv, priv->aid0_bit_set);
+}
+
+int cw1200_set_tim(struct ieee80211_hw *dev, struct ieee80211_sta *sta,
+ bool set)
+{
+ struct cw1200_common *priv = dev->priv;
+ queue_work(priv->workqueue, &priv->set_tim_work);
+ return 0;
+}
+
+void cw1200_set_cts_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, set_cts_work);
+
+ u8 erp_ie[3] = {WLAN_EID_ERP_INFO, 0x1, 0};
+ struct wsm_update_ie update_ie = {
+ .what = WSM_UPDATE_IE_BEACON,
+ .count = 1,
+ .ies = erp_ie,
+ .length = 3,
+ };
+ u32 erp_info;
+ __le32 use_cts_prot;
+ mutex_lock(&priv->conf_mutex);
+ erp_info = priv->erp_info;
+ mutex_unlock(&priv->conf_mutex);
+ use_cts_prot =
+ erp_info & WLAN_ERP_USE_PROTECTION ?
+ __cpu_to_le32(1) : 0;
+
+ erp_ie[ERP_INFO_BYTE_OFFSET] = erp_info;
+
+ pr_debug("[STA] ERP information 0x%x\n", erp_info);
+
+ wsm_write_mib(priv, WSM_MIB_ID_NON_ERP_PROTECTION,
+ &use_cts_prot, sizeof(use_cts_prot));
+ wsm_update_ie(priv, &update_ie);
+
+ return;
+}
+
+static int cw1200_set_btcoexinfo(struct cw1200_common *priv)
+{
+ struct wsm_override_internal_txrate arg;
+ int ret = 0;
+
+ if (priv->mode == NL80211_IFTYPE_STATION) {
+ /* Plumb PSPOLL and NULL template */
+ cw1200_upload_pspoll(priv);
+ cw1200_upload_null(priv);
+ cw1200_upload_qosnull(priv);
+ } else {
+ return 0;
+ }
+
+ memset(&arg, 0, sizeof(struct wsm_override_internal_txrate));
+
+ if (!priv->vif->p2p) {
+ /* STATION mode */
+ if (priv->bss_params.operational_rate_set & ~0xF) {
+ pr_debug("[STA] STA has ERP rates\n");
+ /* G or BG mode */
+ arg.internalTxRate = (__ffs(
+ priv->bss_params.operational_rate_set & ~0xF));
+ } else {
+ pr_debug("[STA] STA has non ERP rates\n");
+ /* B only mode */
+ arg.internalTxRate = (__ffs(le32_to_cpu(priv->association_mode.basic_rate_set)));
+ }
+ arg.nonErpInternalTxRate = (__ffs(le32_to_cpu(priv->association_mode.basic_rate_set)));
+ } else {
+ /* P2P mode */
+ arg.internalTxRate = (__ffs(priv->bss_params.operational_rate_set & ~0xF));
+ arg.nonErpInternalTxRate = (__ffs(priv->bss_params.operational_rate_set & ~0xF));
+ }
+
+ pr_debug("[STA] BTCOEX_INFO MODE %d, internalTxRate : %x, nonErpInternalTxRate: %x\n",
+ priv->mode,
+ arg.internalTxRate,
+ arg.nonErpInternalTxRate);
+
+ ret = wsm_write_mib(priv, WSM_MIB_ID_OVERRIDE_INTERNAL_TX_RATE,
+ &arg, sizeof(arg));
+
+ return ret;
+}
+
+void cw1200_bss_info_changed(struct ieee80211_hw *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u32 changed)
+{
+ struct cw1200_common *priv = dev->priv;
+ bool do_join = false;
+
+ mutex_lock(&priv->conf_mutex);
+
+ pr_debug("BSS CHANGED: %08x\n", changed);
+
+ /* TODO: BSS_CHANGED_QOS */
+ /* TODO: BSS_CHANGED_TXPOWER */
+
+ if (changed & BSS_CHANGED_ARP_FILTER) {
+ struct wsm_mib_arp_ipv4_filter filter = {0};
+ int i;
+
+ pr_debug("[STA] BSS_CHANGED_ARP_FILTER cnt: %d\n",
+ info->arp_addr_cnt);
+
+ /* Currently only one IP address is supported by firmware.
+ * In case of more IPs arp filtering will be disabled.
+ */
+ if (info->arp_addr_cnt > 0 &&
+ info->arp_addr_cnt <= WSM_MAX_ARP_IP_ADDRTABLE_ENTRIES) {
+ for (i = 0; i < info->arp_addr_cnt; i++) {
+ filter.ipv4addrs[i] = info->arp_addr_list[i];
+ pr_debug("[STA] addr[%d]: 0x%X\n",
+ i, filter.ipv4addrs[i]);
+ }
+ filter.enable = __cpu_to_le32(1);
+ }
+
+ pr_debug("[STA] arp ip filter enable: %d\n",
+ __le32_to_cpu(filter.enable));
+
+ wsm_set_arp_ipv4_filter(priv, &filter);
+ }
+
+ if (changed &
+ (BSS_CHANGED_BEACON |
+ BSS_CHANGED_AP_PROBE_RESP |
+ BSS_CHANGED_BSSID |
+ BSS_CHANGED_SSID |
+ BSS_CHANGED_IBSS)) {
+ pr_debug("BSS_CHANGED_BEACON\n");
+ priv->beacon_int = info->beacon_int;
+ cw1200_update_beaconing(priv);
+ cw1200_upload_beacon(priv);
+ }
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ pr_debug("BSS_CHANGED_BEACON_ENABLED (%d)\n", info->enable_beacon);
+
+ if (priv->enable_beacon != info->enable_beacon) {
+ cw1200_enable_beaconing(priv, info->enable_beacon);
+ priv->enable_beacon = info->enable_beacon;
+ }
+ }
+
+ if (changed & BSS_CHANGED_BEACON_INT) {
+ pr_debug("CHANGED_BEACON_INT\n");
+ if (info->ibss_joined)
+ do_join = true;
+ else if (priv->join_status == CW1200_JOIN_STATUS_AP)
+ cw1200_update_beaconing(priv);
+ }
+
+ /* assoc/disassoc, or maybe AID changed */
+ if (changed & BSS_CHANGED_ASSOC) {
+ wsm_lock_tx(priv);
+ priv->wep_default_key_id = -1;
+ wsm_unlock_tx(priv);
+ }
+
+ if (changed & BSS_CHANGED_BSSID) {
+ pr_debug("BSS_CHANGED_BSSID\n");
+ do_join = true;
+ }
+
+ if (changed &
+ (BSS_CHANGED_ASSOC |
+ BSS_CHANGED_BSSID |
+ BSS_CHANGED_IBSS |
+ BSS_CHANGED_BASIC_RATES |
+ BSS_CHANGED_HT)) {
+ pr_debug("BSS_CHANGED_ASSOC\n");
+ if (info->assoc) {
+ if (priv->join_status < CW1200_JOIN_STATUS_PRE_STA) {
+ ieee80211_connection_loss(vif);
+ mutex_unlock(&priv->conf_mutex);
+ return;
+ } else if (priv->join_status == CW1200_JOIN_STATUS_PRE_STA) {
+ priv->join_status = CW1200_JOIN_STATUS_STA;
+ }
+ } else {
+ do_join = true;
+ }
+
+ if (info->assoc || info->ibss_joined) {
+ struct ieee80211_sta *sta = NULL;
+ __le32 htprot = 0;
+
+ if (info->dtim_period)
+ priv->join_dtim_period = info->dtim_period;
+ priv->beacon_int = info->beacon_int;
+
+ rcu_read_lock();
+
+ if (info->bssid && !info->ibss_joined)
+ sta = ieee80211_find_sta(vif, info->bssid);
+ if (sta) {
+ priv->ht_info.ht_cap = sta->ht_cap;
+ priv->bss_params.operational_rate_set =
+ cw1200_rate_mask_to_wsm(priv,
+ sta->supp_rates[priv->channel->band]);
+ priv->ht_info.channel_type = cfg80211_get_chandef_type(&dev->conf.chandef);
+ priv->ht_info.operation_mode = info->ht_operation_mode;
+ } else {
+ memset(&priv->ht_info, 0,
+ sizeof(priv->ht_info));
+ priv->bss_params.operational_rate_set = -1;
+ }
+ rcu_read_unlock();
+
+ /* Non Greenfield stations present */
+ if (priv->ht_info.operation_mode &
+ IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT)
+ htprot |= cpu_to_le32(WSM_NON_GREENFIELD_STA_PRESENT);
+
+ /* Set HT protection method */
+ htprot |= cpu_to_le32((priv->ht_info.operation_mode & IEEE80211_HT_OP_MODE_PROTECTION) << 2);
+
+ /* TODO:
+ * STBC_param.dual_cts
+ * STBC_param.LSIG_TXOP_FILL
+ */
+
+ wsm_write_mib(priv, WSM_MIB_ID_SET_HT_PROTECTION,
+ &htprot, sizeof(htprot));
+
+ priv->association_mode.greenfield =
+ cw1200_ht_greenfield(&priv->ht_info);
+ priv->association_mode.flags =
+ WSM_ASSOCIATION_MODE_SNOOP_ASSOC_FRAMES |
+ WSM_ASSOCIATION_MODE_USE_PREAMBLE_TYPE |
+ WSM_ASSOCIATION_MODE_USE_HT_MODE |
+ WSM_ASSOCIATION_MODE_USE_BASIC_RATE_SET |
+ WSM_ASSOCIATION_MODE_USE_MPDU_START_SPACING;
+ priv->association_mode.preamble =
+ info->use_short_preamble ?
+ WSM_JOIN_PREAMBLE_SHORT :
+ WSM_JOIN_PREAMBLE_LONG;
+ priv->association_mode.basic_rate_set = __cpu_to_le32(
+ cw1200_rate_mask_to_wsm(priv,
+ info->basic_rates));
+ priv->association_mode.mpdu_start_spacing =
+ cw1200_ht_ampdu_density(&priv->ht_info);
+
+ cw1200_cqm_bssloss_sm(priv, 0, 0, 0);
+ cancel_work_sync(&priv->unjoin_work);
+
+ priv->bss_params.beacon_lost_count = priv->cqm_beacon_loss_count;
+ priv->bss_params.aid = info->aid;
+
+ if (priv->join_dtim_period < 1)
+ priv->join_dtim_period = 1;
+
+ pr_debug("[STA] DTIM %d, interval: %d\n",
+ priv->join_dtim_period, priv->beacon_int);
+ pr_debug("[STA] Preamble: %d, Greenfield: %d, Aid: %d, Rates: 0x%.8X, Basic: 0x%.8X\n",
+ priv->association_mode.preamble,
+ priv->association_mode.greenfield,
+ priv->bss_params.aid,
+ priv->bss_params.operational_rate_set,
+ priv->association_mode.basic_rate_set);
+ wsm_set_association_mode(priv, &priv->association_mode);
+
+ if (!info->ibss_joined) {
+ wsm_keep_alive_period(priv, 30 /* sec */);
+ wsm_set_bss_params(priv, &priv->bss_params);
+ priv->setbssparams_done = true;
+ cw1200_set_beacon_wakeup_period_work(&priv->set_beacon_wakeup_period_work);
+ cw1200_set_pm(priv, &priv->powersave_mode);
+ }
+ if (priv->vif->p2p) {
+ pr_debug("[STA] Setting p2p powersave configuration.\n");
+ wsm_set_p2p_ps_modeinfo(priv,
+ &priv->p2p_ps_modeinfo);
+ }
+ if (priv->bt_present)
+ cw1200_set_btcoexinfo(priv);
+ } else {
+ memset(&priv->association_mode, 0,
+ sizeof(priv->association_mode));
+ memset(&priv->bss_params, 0, sizeof(priv->bss_params));
+ }
+ }
+
+ /* ERP Protection */
+ if (changed & (BSS_CHANGED_ASSOC |
+ BSS_CHANGED_ERP_CTS_PROT |
+ BSS_CHANGED_ERP_PREAMBLE)) {
+ u32 prev_erp_info = priv->erp_info;
+ if (info->use_cts_prot)
+ priv->erp_info |= WLAN_ERP_USE_PROTECTION;
+ else if (!(prev_erp_info & WLAN_ERP_NON_ERP_PRESENT))
+ priv->erp_info &= ~WLAN_ERP_USE_PROTECTION;
+
+ if (info->use_short_preamble)
+ priv->erp_info |= WLAN_ERP_BARKER_PREAMBLE;
+ else
+ priv->erp_info &= ~WLAN_ERP_BARKER_PREAMBLE;
+
+ pr_debug("[STA] ERP Protection: %x\n", priv->erp_info);
+
+ if (prev_erp_info != priv->erp_info)
+ queue_work(priv->workqueue, &priv->set_cts_work);
+ }
+
+ /* ERP Slottime */
+ if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_SLOT)) {
+ __le32 slot_time = info->use_short_slot ?
+ __cpu_to_le32(9) : __cpu_to_le32(20);
+ pr_debug("[STA] Slot time: %d us.\n",
+ __le32_to_cpu(slot_time));
+ wsm_write_mib(priv, WSM_MIB_ID_DOT11_SLOT_TIME,
+ &slot_time, sizeof(slot_time));
+ }
+
+ if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_CQM)) {
+ struct wsm_rcpi_rssi_threshold threshold = {
+ .rollingAverageCount = 8,
+ };
+ pr_debug("[CQM] RSSI threshold subscribe: %d +- %d\n",
+ info->cqm_rssi_thold, info->cqm_rssi_hyst);
+ priv->cqm_rssi_thold = info->cqm_rssi_thold;
+ priv->cqm_rssi_hyst = info->cqm_rssi_hyst;
+
+ if (info->cqm_rssi_thold || info->cqm_rssi_hyst) {
+ /* RSSI subscription enabled */
+ /* TODO: It's not a correct way of setting threshold.
+ * Upper and lower must be set equal here and adjusted
+ * in callback. However current implementation is much
+ * more relaible and stable.
+ */
+
+ /* RSSI: signed Q8.0, RCPI: unsigned Q7.1
+ * RSSI = RCPI / 2 - 110
+ */
+ if (priv->cqm_use_rssi) {
+ threshold.upperThreshold =
+ info->cqm_rssi_thold + info->cqm_rssi_hyst;
+ threshold.lowerThreshold =
+ info->cqm_rssi_thold;
+ threshold.rssiRcpiMode |= WSM_RCPI_RSSI_USE_RSSI;
+ } else {
+ threshold.upperThreshold = (info->cqm_rssi_thold + info->cqm_rssi_hyst + 110) * 2;
+ threshold.lowerThreshold = (info->cqm_rssi_thold + 110) * 2;
+ }
+ threshold.rssiRcpiMode |= WSM_RCPI_RSSI_THRESHOLD_ENABLE;
+ } else {
+ /* There is a bug in FW, see sta.c. We have to enable
+ * dummy subscription to get correct RSSI values.
+ */
+ threshold.rssiRcpiMode |=
+ WSM_RCPI_RSSI_THRESHOLD_ENABLE |
+ WSM_RCPI_RSSI_DONT_USE_UPPER |
+ WSM_RCPI_RSSI_DONT_USE_LOWER;
+ if (priv->cqm_use_rssi)
+ threshold.rssiRcpiMode |= WSM_RCPI_RSSI_USE_RSSI;
+ }
+ wsm_set_rcpi_rssi_threshold(priv, &threshold);
+ }
+ mutex_unlock(&priv->conf_mutex);
+
+ if (do_join) {
+ wsm_lock_tx(priv);
+ cw1200_do_join(priv); /* Will unlock it for us */
+ }
+}
+
+void cw1200_multicast_start_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, multicast_start_work);
+ long tmo = priv->join_dtim_period *
+ (priv->beacon_int + 20) * HZ / 1024;
+
+ cancel_work_sync(&priv->multicast_stop_work);
+
+ if (!priv->aid0_bit_set) {
+ wsm_lock_tx(priv);
+ cw1200_set_tim_impl(priv, true);
+ priv->aid0_bit_set = true;
+ mod_timer(&priv->mcast_timeout, jiffies + tmo);
+ wsm_unlock_tx(priv);
+ }
+}
+
+void cw1200_multicast_stop_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, multicast_stop_work);
+
+ if (priv->aid0_bit_set) {
+ del_timer_sync(&priv->mcast_timeout);
+ wsm_lock_tx(priv);
+ priv->aid0_bit_set = false;
+ cw1200_set_tim_impl(priv, false);
+ wsm_unlock_tx(priv);
+ }
+}
+
+void cw1200_mcast_timeout(unsigned long arg)
+{
+ struct cw1200_common *priv =
+ (struct cw1200_common *)arg;
+
+ wiphy_warn(priv->hw->wiphy,
+ "Multicast delivery timeout.\n");
+ spin_lock_bh(&priv->ps_state_lock);
+ priv->tx_multicast = priv->aid0_bit_set &&
+ priv->buffered_multicasts;
+ if (priv->tx_multicast)
+ cw1200_bh_wakeup(priv);
+ spin_unlock_bh(&priv->ps_state_lock);
+}
+
+int cw1200_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
+{
+ /* Aggregation is implemented fully in firmware,
+ * including block ack negotiation. Do not allow
+ * mac80211 stack to do anything: it interferes with
+ * the firmware.
+ */
+
+ /* Note that we still need this function stubbed. */
+ return -ENOTSUPP;
+}
+
+/* ******************************************************************** */
+/* WSM callback */
+void cw1200_suspend_resume(struct cw1200_common *priv,
+ struct wsm_suspend_resume *arg)
+{
+ pr_debug("[AP] %s: %s\n",
+ arg->stop ? "stop" : "start",
+ arg->multicast ? "broadcast" : "unicast");
+
+ if (arg->multicast) {
+ bool cancel_tmo = false;
+ spin_lock_bh(&priv->ps_state_lock);
+ if (arg->stop) {
+ priv->tx_multicast = false;
+ } else {
+ /* Firmware sends this indication every DTIM if there
+ * is a STA in powersave connected. There is no reason
+ * to suspend, following wakeup will consume much more
+ * power than it could be saved.
+ */
+ cw1200_pm_stay_awake(&priv->pm_state,
+ priv->join_dtim_period *
+ (priv->beacon_int + 20) * HZ / 1024);
+ priv->tx_multicast = (priv->aid0_bit_set &&
+ priv->buffered_multicasts);
+ if (priv->tx_multicast) {
+ cancel_tmo = true;
+ cw1200_bh_wakeup(priv);
+ }
+ }
+ spin_unlock_bh(&priv->ps_state_lock);
+ if (cancel_tmo)
+ del_timer_sync(&priv->mcast_timeout);
+ } else {
+ spin_lock_bh(&priv->ps_state_lock);
+ cw1200_ps_notify(priv, arg->link_id, arg->stop);
+ spin_unlock_bh(&priv->ps_state_lock);
+ if (!arg->stop)
+ cw1200_bh_wakeup(priv);
+ }
+ return;
+}
+
+/* ******************************************************************** */
+/* AP privates */
+
+static int cw1200_upload_beacon(struct cw1200_common *priv)
+{
+ int ret = 0;
+ struct ieee80211_mgmt *mgmt;
+ struct wsm_template_frame frame = {
+ .frame_type = WSM_FRAME_TYPE_BEACON,
+ };
+
+ u16 tim_offset;
+ u16 tim_len;
+
+ if (priv->mode == NL80211_IFTYPE_STATION ||
+ priv->mode == NL80211_IFTYPE_MONITOR ||
+ priv->mode == NL80211_IFTYPE_UNSPECIFIED)
+ goto done;
+
+ if (priv->vif->p2p)
+ frame.rate = WSM_TRANSMIT_RATE_6;
+
+ frame.skb = ieee80211_beacon_get_tim(priv->hw, priv->vif,
+ &tim_offset, &tim_len);
+ if (!frame.skb)
+ return -ENOMEM;
+
+ ret = wsm_set_template_frame(priv, &frame);
+
+ if (ret)
+ goto done;
+
+ /* TODO: Distill probe resp; remove TIM
+ * and any other beacon-specific IEs
+ */
+ mgmt = (void *)frame.skb->data;
+ mgmt->frame_control =
+ __cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_PROBE_RESP);
+
+ frame.frame_type = WSM_FRAME_TYPE_PROBE_RESPONSE;
+ if (priv->vif->p2p) {
+ ret = wsm_set_probe_responder(priv, true);
+ } else {
+ ret = wsm_set_template_frame(priv, &frame);
+ wsm_set_probe_responder(priv, false);
+ }
+
+done:
+ dev_kfree_skb(frame.skb);
+
+ return ret;
+}
+
+static int cw1200_upload_pspoll(struct cw1200_common *priv)
+{
+ int ret = 0;
+ struct wsm_template_frame frame = {
+ .frame_type = WSM_FRAME_TYPE_PS_POLL,
+ .rate = 0xFF,
+ };
+
+
+ frame.skb = ieee80211_pspoll_get(priv->hw, priv->vif);
+ if (!frame.skb)
+ return -ENOMEM;
+
+ ret = wsm_set_template_frame(priv, &frame);
+
+ dev_kfree_skb(frame.skb);
+
+ return ret;
+}
+
+static int cw1200_upload_null(struct cw1200_common *priv)
+{
+ int ret = 0;
+ struct wsm_template_frame frame = {
+ .frame_type = WSM_FRAME_TYPE_NULL,
+ .rate = 0xFF,
+ };
+
+ frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif);
+ if (!frame.skb)
+ return -ENOMEM;
+
+ ret = wsm_set_template_frame(priv, &frame);
+
+ dev_kfree_skb(frame.skb);
+
+ return ret;
+}
+
+static int cw1200_upload_qosnull(struct cw1200_common *priv)
+{
+ int ret = 0;
+ /* TODO: This needs to be implemented
+
+ struct wsm_template_frame frame = {
+ .frame_type = WSM_FRAME_TYPE_QOS_NULL,
+ .rate = 0xFF,
+ };
+
+ frame.skb = ieee80211_qosnullfunc_get(priv->hw, priv->vif);
+ if (!frame.skb)
+ return -ENOMEM;
+
+ ret = wsm_set_template_frame(priv, &frame);
+
+ dev_kfree_skb(frame.skb);
+
+ */
+ return ret;
+}
+
+static int cw1200_enable_beaconing(struct cw1200_common *priv,
+ bool enable)
+{
+ struct wsm_beacon_transmit transmit = {
+ .enable_beaconing = enable,
+ };
+
+ return wsm_beacon_transmit(priv, &transmit);
+}
+
+static int cw1200_start_ap(struct cw1200_common *priv)
+{
+ int ret;
+ struct ieee80211_bss_conf *conf = &priv->vif->bss_conf;
+ struct wsm_start start = {
+ .mode = priv->vif->p2p ?
+ WSM_START_MODE_P2P_GO : WSM_START_MODE_AP,
+ .band = (priv->channel->band == IEEE80211_BAND_5GHZ) ?
+ WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G,
+ .channel_number = priv->channel->hw_value,
+ .beacon_interval = conf->beacon_int,
+ .dtim_period = conf->dtim_period,
+ .preamble = conf->use_short_preamble ?
+ WSM_JOIN_PREAMBLE_SHORT :
+ WSM_JOIN_PREAMBLE_LONG,
+ .probe_delay = 100,
+ .basic_rate_set = cw1200_rate_mask_to_wsm(priv,
+ conf->basic_rates),
+ };
+ struct wsm_operational_mode mode = {
+ .power_mode = cw1200_power_mode,
+ .disable_more_flag_usage = true,
+ };
+
+ memset(start.ssid, 0, sizeof(start.ssid));
+ if (!conf->hidden_ssid) {
+ start.ssid_len = conf->ssid_len;
+ memcpy(start.ssid, conf->ssid, start.ssid_len);
+ }
+
+ priv->beacon_int = conf->beacon_int;
+ priv->join_dtim_period = conf->dtim_period;
+
+ memset(&priv->link_id_db, 0, sizeof(priv->link_id_db));
+
+ pr_debug("[AP] ch: %d(%d), bcn: %d(%d), brt: 0x%.8X, ssid: %.*s.\n",
+ start.channel_number, start.band,
+ start.beacon_interval, start.dtim_period,
+ start.basic_rate_set,
+ start.ssid_len, start.ssid);
+ ret = wsm_start(priv, &start);
+ if (!ret)
+ ret = cw1200_upload_keys(priv);
+ if (!ret && priv->vif->p2p) {
+ pr_debug("[AP] Setting p2p powersave configuration.\n");
+ wsm_set_p2p_ps_modeinfo(priv, &priv->p2p_ps_modeinfo);
+ }
+ if (!ret) {
+ wsm_set_block_ack_policy(priv, 0, 0);
+ priv->join_status = CW1200_JOIN_STATUS_AP;
+ cw1200_update_filtering(priv);
+ }
+ wsm_set_operational_mode(priv, &mode);
+ return ret;
+}
+
+static int cw1200_update_beaconing(struct cw1200_common *priv)
+{
+ struct ieee80211_bss_conf *conf = &priv->vif->bss_conf;
+ struct wsm_reset reset = {
+ .link_id = 0,
+ .reset_statistics = true,
+ };
+
+ if (priv->mode == NL80211_IFTYPE_AP) {
+ /* TODO: check if changed channel, band */
+ if (priv->join_status != CW1200_JOIN_STATUS_AP ||
+ priv->beacon_int != conf->beacon_int) {
+ pr_debug("ap restarting\n");
+ wsm_lock_tx(priv);
+ if (priv->join_status != CW1200_JOIN_STATUS_PASSIVE)
+ wsm_reset(priv, &reset);
+ priv->join_status = CW1200_JOIN_STATUS_PASSIVE;
+ cw1200_start_ap(priv);
+ wsm_unlock_tx(priv);
+ } else
+ pr_debug("ap started join_status: %d\n",
+ priv->join_status);
+ }
+ return 0;
+}
diff --git a/drivers/net/wireless/cw1200/sta.h b/drivers/net/wireless/cw1200/sta.h
new file mode 100644
index 000000000000..35babb62cc6a
--- /dev/null
+++ b/drivers/net/wireless/cw1200/sta.h
@@ -0,0 +1,123 @@
+/*
+ * Mac80211 STA interface for ST-Ericsson CW1200 mac80211 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef STA_H_INCLUDED
+#define STA_H_INCLUDED
+
+/* ******************************************************************** */
+/* mac80211 API */
+
+int cw1200_start(struct ieee80211_hw *dev);
+void cw1200_stop(struct ieee80211_hw *dev);
+int cw1200_add_interface(struct ieee80211_hw *dev,
+ struct ieee80211_vif *vif);
+void cw1200_remove_interface(struct ieee80211_hw *dev,
+ struct ieee80211_vif *vif);
+int cw1200_change_interface(struct ieee80211_hw *dev,
+ struct ieee80211_vif *vif,
+ enum nl80211_iftype new_type,
+ bool p2p);
+int cw1200_config(struct ieee80211_hw *dev, u32 changed);
+void cw1200_configure_filter(struct ieee80211_hw *dev,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast);
+int cw1200_conf_tx(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params);
+int cw1200_get_stats(struct ieee80211_hw *dev,
+ struct ieee80211_low_level_stats *stats);
+int cw1200_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+
+int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
+
+void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
+
+u64 cw1200_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list);
+
+int cw1200_set_pm(struct cw1200_common *priv, const struct wsm_set_pm *arg);
+
+/* ******************************************************************** */
+/* WSM callbacks */
+
+void cw1200_join_complete_cb(struct cw1200_common *priv,
+ struct wsm_join_complete *arg);
+
+/* ******************************************************************** */
+/* WSM events */
+
+void cw1200_free_event_queue(struct cw1200_common *priv);
+void cw1200_event_handler(struct work_struct *work);
+void cw1200_bss_loss_work(struct work_struct *work);
+void cw1200_bss_params_work(struct work_struct *work);
+void cw1200_keep_alive_work(struct work_struct *work);
+void cw1200_tx_failure_work(struct work_struct *work);
+
+void __cw1200_cqm_bssloss_sm(struct cw1200_common *priv, int init, int good,
+ int bad);
+static inline void cw1200_cqm_bssloss_sm(struct cw1200_common *priv,
+ int init, int good, int bad)
+{
+ spin_lock(&priv->bss_loss_lock);
+ __cw1200_cqm_bssloss_sm(priv, init, good, bad);
+ spin_unlock(&priv->bss_loss_lock);
+}
+
+/* ******************************************************************** */
+/* Internal API */
+
+int cw1200_setup_mac(struct cw1200_common *priv);
+void cw1200_join_timeout(struct work_struct *work);
+void cw1200_unjoin_work(struct work_struct *work);
+void cw1200_join_complete_work(struct work_struct *work);
+void cw1200_wep_key_work(struct work_struct *work);
+void cw1200_update_listening(struct cw1200_common *priv, bool enabled);
+void cw1200_update_filtering(struct cw1200_common *priv);
+void cw1200_update_filtering_work(struct work_struct *work);
+void cw1200_set_beacon_wakeup_period_work(struct work_struct *work);
+int cw1200_enable_listening(struct cw1200_common *priv);
+int cw1200_disable_listening(struct cw1200_common *priv);
+int cw1200_set_uapsd_param(struct cw1200_common *priv,
+ const struct wsm_edca_params *arg);
+void cw1200_ba_work(struct work_struct *work);
+void cw1200_ba_timer(unsigned long arg);
+
+/* AP stuffs */
+int cw1200_set_tim(struct ieee80211_hw *dev, struct ieee80211_sta *sta,
+ bool set);
+int cw1200_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int cw1200_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void cw1200_sta_notify(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
+ enum sta_notify_cmd notify_cmd,
+ struct ieee80211_sta *sta);
+void cw1200_bss_info_changed(struct ieee80211_hw *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u32 changed);
+int cw1200_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size);
+
+void cw1200_suspend_resume(struct cw1200_common *priv,
+ struct wsm_suspend_resume *arg);
+void cw1200_set_tim_work(struct work_struct *work);
+void cw1200_set_cts_work(struct work_struct *work);
+void cw1200_multicast_start_work(struct work_struct *work);
+void cw1200_multicast_stop_work(struct work_struct *work);
+void cw1200_mcast_timeout(unsigned long arg);
+
+#endif
diff --git a/drivers/net/wireless/cw1200/txrx.c b/drivers/net/wireless/cw1200/txrx.c
new file mode 100644
index 000000000000..e824d4d4a18d
--- /dev/null
+++ b/drivers/net/wireless/cw1200/txrx.c
@@ -0,0 +1,1473 @@
+/*
+ * Datapath implementation for ST-Ericsson CW1200 mac80211 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include "cw1200.h"
+#include "wsm.h"
+#include "bh.h"
+#include "sta.h"
+#include "debug.h"
+
+#define CW1200_INVALID_RATE_ID (0xFF)
+
+static int cw1200_handle_action_rx(struct cw1200_common *priv,
+ struct sk_buff *skb);
+static const struct ieee80211_rate *
+cw1200_get_tx_rate(const struct cw1200_common *priv,
+ const struct ieee80211_tx_rate *rate);
+
+/* ******************************************************************** */
+/* TX queue lock / unlock */
+
+static inline void cw1200_tx_queues_lock(struct cw1200_common *priv)
+{
+ int i;
+ for (i = 0; i < 4; ++i)
+ cw1200_queue_lock(&priv->tx_queue[i]);
+}
+
+static inline void cw1200_tx_queues_unlock(struct cw1200_common *priv)
+{
+ int i;
+ for (i = 0; i < 4; ++i)
+ cw1200_queue_unlock(&priv->tx_queue[i]);
+}
+
+/* ******************************************************************** */
+/* TX policy cache implementation */
+
+static void tx_policy_dump(struct tx_policy *policy)
+{
+ pr_debug("[TX policy] %.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X %.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X %.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X: %d\n",
+ policy->raw[0] & 0x0F, policy->raw[0] >> 4,
+ policy->raw[1] & 0x0F, policy->raw[1] >> 4,
+ policy->raw[2] & 0x0F, policy->raw[2] >> 4,
+ policy->raw[3] & 0x0F, policy->raw[3] >> 4,
+ policy->raw[4] & 0x0F, policy->raw[4] >> 4,
+ policy->raw[5] & 0x0F, policy->raw[5] >> 4,
+ policy->raw[6] & 0x0F, policy->raw[6] >> 4,
+ policy->raw[7] & 0x0F, policy->raw[7] >> 4,
+ policy->raw[8] & 0x0F, policy->raw[8] >> 4,
+ policy->raw[9] & 0x0F, policy->raw[9] >> 4,
+ policy->raw[10] & 0x0F, policy->raw[10] >> 4,
+ policy->raw[11] & 0x0F, policy->raw[11] >> 4,
+ policy->defined);
+}
+
+static void tx_policy_build(const struct cw1200_common *priv,
+ /* [out] */ struct tx_policy *policy,
+ struct ieee80211_tx_rate *rates, size_t count)
+{
+ int i, j;
+ unsigned limit = priv->short_frame_max_tx_count;
+ unsigned total = 0;
+ BUG_ON(rates[0].idx < 0);
+ memset(policy, 0, sizeof(*policy));
+
+ /* Sort rates in descending order. */
+ for (i = 1; i < count; ++i) {
+ if (rates[i].idx < 0) {
+ count = i;
+ break;
+ }
+ if (rates[i].idx > rates[i - 1].idx) {
+ struct ieee80211_tx_rate tmp = rates[i - 1];
+ rates[i - 1] = rates[i];
+ rates[i] = tmp;
+ }
+ }
+
+ /* Eliminate duplicates. */
+ total = rates[0].count;
+ for (i = 0, j = 1; j < count; ++j) {
+ if (rates[j].idx == rates[i].idx) {
+ rates[i].count += rates[j].count;
+ } else if (rates[j].idx > rates[i].idx) {
+ break;
+ } else {
+ ++i;
+ if (i != j)
+ rates[i] = rates[j];
+ }
+ total += rates[j].count;
+ }
+ count = i + 1;
+
+ /* Re-fill policy trying to keep every requested rate and with
+ * respect to the global max tx retransmission count.
+ */
+ if (limit < count)
+ limit = count;
+ if (total > limit) {
+ for (i = 0; i < count; ++i) {
+ int left = count - i - 1;
+ if (rates[i].count > limit - left)
+ rates[i].count = limit - left;
+ limit -= rates[i].count;
+ }
+ }
+
+ /* HACK!!! Device has problems (at least) switching from
+ * 54Mbps CTS to 1Mbps. This switch takes enormous amount
+ * of time (100-200 ms), leading to valuable throughput drop.
+ * As a workaround, additional g-rates are injected to the
+ * policy.
+ */
+ if (count == 2 && !(rates[0].flags & IEEE80211_TX_RC_MCS) &&
+ rates[0].idx > 4 && rates[0].count > 2 &&
+ rates[1].idx < 2) {
+ int mid_rate = (rates[0].idx + 4) >> 1;
+
+ /* Decrease number of retries for the initial rate */
+ rates[0].count -= 2;
+
+ if (mid_rate != 4) {
+ /* Keep fallback rate at 1Mbps. */
+ rates[3] = rates[1];
+
+ /* Inject 1 transmission on lowest g-rate */
+ rates[2].idx = 4;
+ rates[2].count = 1;
+ rates[2].flags = rates[1].flags;
+
+ /* Inject 1 transmission on mid-rate */
+ rates[1].idx = mid_rate;
+ rates[1].count = 1;
+
+ /* Fallback to 1 Mbps is a really bad thing,
+ * so let's try to increase probability of
+ * successful transmission on the lowest g rate
+ * even more
+ */
+ if (rates[0].count >= 3) {
+ --rates[0].count;
+ ++rates[2].count;
+ }
+
+ /* Adjust amount of rates defined */
+ count += 2;
+ } else {
+ /* Keep fallback rate at 1Mbps. */
+ rates[2] = rates[1];
+
+ /* Inject 2 transmissions on lowest g-rate */
+ rates[1].idx = 4;
+ rates[1].count = 2;
+
+ /* Adjust amount of rates defined */
+ count += 1;
+ }
+ }
+
+ policy->defined = cw1200_get_tx_rate(priv, &rates[0])->hw_value + 1;
+
+ for (i = 0; i < count; ++i) {
+ register unsigned rateid, off, shift, retries;
+
+ rateid = cw1200_get_tx_rate(priv, &rates[i])->hw_value;
+ off = rateid >> 3; /* eq. rateid / 8 */
+ shift = (rateid & 0x07) << 2; /* eq. (rateid % 8) * 4 */
+
+ retries = rates[i].count;
+ if (retries > 0x0F) {
+ rates[i].count = 0x0f;
+ retries = 0x0F;
+ }
+ policy->tbl[off] |= __cpu_to_le32(retries << shift);
+ policy->retry_count += retries;
+ }
+
+ pr_debug("[TX policy] Policy (%zu): %d:%d, %d:%d, %d:%d, %d:%d\n",
+ count,
+ rates[0].idx, rates[0].count,
+ rates[1].idx, rates[1].count,
+ rates[2].idx, rates[2].count,
+ rates[3].idx, rates[3].count);
+}
+
+static inline bool tx_policy_is_equal(const struct tx_policy *wanted,
+ const struct tx_policy *cached)
+{
+ size_t count = wanted->defined >> 1;
+ if (wanted->defined > cached->defined)
+ return false;
+ if (count) {
+ if (memcmp(wanted->raw, cached->raw, count))
+ return false;
+ }
+ if (wanted->defined & 1) {
+ if ((wanted->raw[count] & 0x0F) != (cached->raw[count] & 0x0F))
+ return false;
+ }
+ return true;
+}
+
+static int tx_policy_find(struct tx_policy_cache *cache,
+ const struct tx_policy *wanted)
+{
+ /* O(n) complexity. Not so good, but there's only 8 entries in
+ * the cache.
+ * Also lru helps to reduce search time.
+ */
+ struct tx_policy_cache_entry *it;
+ /* First search for policy in "used" list */
+ list_for_each_entry(it, &cache->used, link) {
+ if (tx_policy_is_equal(wanted, &it->policy))
+ return it - cache->cache;
+ }
+ /* Then - in "free list" */
+ list_for_each_entry(it, &cache->free, link) {
+ if (tx_policy_is_equal(wanted, &it->policy))
+ return it - cache->cache;
+ }
+ return -1;
+}
+
+static inline void tx_policy_use(struct tx_policy_cache *cache,
+ struct tx_policy_cache_entry *entry)
+{
+ ++entry->policy.usage_count;
+ list_move(&entry->link, &cache->used);
+}
+
+static inline int tx_policy_release(struct tx_policy_cache *cache,
+ struct tx_policy_cache_entry *entry)
+{
+ int ret = --entry->policy.usage_count;
+ if (!ret)
+ list_move(&entry->link, &cache->free);
+ return ret;
+}
+
+void tx_policy_clean(struct cw1200_common *priv)
+{
+ int idx, locked;
+ struct tx_policy_cache *cache = &priv->tx_policy_cache;
+ struct tx_policy_cache_entry *entry;
+
+ cw1200_tx_queues_lock(priv);
+ spin_lock_bh(&cache->lock);
+ locked = list_empty(&cache->free);
+
+ for (idx = 0; idx < TX_POLICY_CACHE_SIZE; idx++) {
+ entry = &cache->cache[idx];
+ /* Policy usage count should be 0 at this time as all queues
+ should be empty
+ */
+ if (WARN_ON(entry->policy.usage_count)) {
+ entry->policy.usage_count = 0;
+ list_move(&entry->link, &cache->free);
+ }
+ memset(&entry->policy, 0, sizeof(entry->policy));
+ }
+ if (locked)
+ cw1200_tx_queues_unlock(priv);
+
+ cw1200_tx_queues_unlock(priv);
+ spin_unlock_bh(&cache->lock);
+}
+
+/* ******************************************************************** */
+/* External TX policy cache API */
+
+void tx_policy_init(struct cw1200_common *priv)
+{
+ struct tx_policy_cache *cache = &priv->tx_policy_cache;
+ int i;
+
+ memset(cache, 0, sizeof(*cache));
+
+ spin_lock_init(&cache->lock);
+ INIT_LIST_HEAD(&cache->used);
+ INIT_LIST_HEAD(&cache->free);
+
+ for (i = 0; i < TX_POLICY_CACHE_SIZE; ++i)
+ list_add(&cache->cache[i].link, &cache->free);
+}
+
+static int tx_policy_get(struct cw1200_common *priv,
+ struct ieee80211_tx_rate *rates,
+ size_t count, bool *renew)
+{
+ int idx;
+ struct tx_policy_cache *cache = &priv->tx_policy_cache;
+ struct tx_policy wanted;
+
+ tx_policy_build(priv, &wanted, rates, count);
+
+ spin_lock_bh(&cache->lock);
+ if (WARN_ON_ONCE(list_empty(&cache->free))) {
+ spin_unlock_bh(&cache->lock);
+ return CW1200_INVALID_RATE_ID;
+ }
+ idx = tx_policy_find(cache, &wanted);
+ if (idx >= 0) {
+ pr_debug("[TX policy] Used TX policy: %d\n", idx);
+ *renew = false;
+ } else {
+ struct tx_policy_cache_entry *entry;
+ *renew = true;
+ /* If policy is not found create a new one
+ * using the oldest entry in "free" list
+ */
+ entry = list_entry(cache->free.prev,
+ struct tx_policy_cache_entry, link);
+ entry->policy = wanted;
+ idx = entry - cache->cache;
+ pr_debug("[TX policy] New TX policy: %d\n", idx);
+ tx_policy_dump(&entry->policy);
+ }
+ tx_policy_use(cache, &cache->cache[idx]);
+ if (list_empty(&cache->free)) {
+ /* Lock TX queues. */
+ cw1200_tx_queues_lock(priv);
+ }
+ spin_unlock_bh(&cache->lock);
+ return idx;
+}
+
+static void tx_policy_put(struct cw1200_common *priv, int idx)
+{
+ int usage, locked;
+ struct tx_policy_cache *cache = &priv->tx_policy_cache;
+
+ spin_lock_bh(&cache->lock);
+ locked = list_empty(&cache->free);
+ usage = tx_policy_release(cache, &cache->cache[idx]);
+ if (locked && !usage) {
+ /* Unlock TX queues. */
+ cw1200_tx_queues_unlock(priv);
+ }
+ spin_unlock_bh(&cache->lock);
+}
+
+static int tx_policy_upload(struct cw1200_common *priv)
+{
+ struct tx_policy_cache *cache = &priv->tx_policy_cache;
+ int i;
+ struct wsm_set_tx_rate_retry_policy arg = {
+ .num = 0,
+ };
+ spin_lock_bh(&cache->lock);
+
+ /* Upload only modified entries. */
+ for (i = 0; i < TX_POLICY_CACHE_SIZE; ++i) {
+ struct tx_policy *src = &cache->cache[i].policy;
+ if (src->retry_count && !src->uploaded) {
+ struct wsm_tx_rate_retry_policy *dst =
+ &arg.tbl[arg.num];
+ dst->index = i;
+ dst->short_retries = priv->short_frame_max_tx_count;
+ dst->long_retries = priv->long_frame_max_tx_count;
+
+ dst->flags = WSM_TX_RATE_POLICY_FLAG_TERMINATE_WHEN_FINISHED |
+ WSM_TX_RATE_POLICY_FLAG_COUNT_INITIAL_TRANSMIT;
+ memcpy(dst->rate_count_indices, src->tbl,
+ sizeof(dst->rate_count_indices));
+ src->uploaded = 1;
+ ++arg.num;
+ }
+ }
+ spin_unlock_bh(&cache->lock);
+ cw1200_debug_tx_cache_miss(priv);
+ pr_debug("[TX policy] Upload %d policies\n", arg.num);
+ return wsm_set_tx_rate_retry_policy(priv, &arg);
+}
+
+void tx_policy_upload_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, tx_policy_upload_work);
+
+ pr_debug("[TX] TX policy upload.\n");
+ tx_policy_upload(priv);
+
+ wsm_unlock_tx(priv);
+ cw1200_tx_queues_unlock(priv);
+}
+
+/* ******************************************************************** */
+/* cw1200 TX implementation */
+
+struct cw1200_txinfo {
+ struct sk_buff *skb;
+ unsigned queue;
+ struct ieee80211_tx_info *tx_info;
+ const struct ieee80211_rate *rate;
+ struct ieee80211_hdr *hdr;
+ size_t hdrlen;
+ const u8 *da;
+ struct cw1200_sta_priv *sta_priv;
+ struct ieee80211_sta *sta;
+ struct cw1200_txpriv txpriv;
+};
+
+u32 cw1200_rate_mask_to_wsm(struct cw1200_common *priv, u32 rates)
+{
+ u32 ret = 0;
+ int i;
+ for (i = 0; i < 32; ++i) {
+ if (rates & BIT(i))
+ ret |= BIT(priv->rates[i].hw_value);
+ }
+ return ret;
+}
+
+static const struct ieee80211_rate *
+cw1200_get_tx_rate(const struct cw1200_common *priv,
+ const struct ieee80211_tx_rate *rate)
+{
+ if (rate->idx < 0)
+ return NULL;
+ if (rate->flags & IEEE80211_TX_RC_MCS)
+ return &priv->mcs_rates[rate->idx];
+ return &priv->hw->wiphy->bands[priv->channel->band]->
+ bitrates[rate->idx];
+}
+
+static int
+cw1200_tx_h_calc_link_ids(struct cw1200_common *priv,
+ struct cw1200_txinfo *t)
+{
+ if (t->sta && t->sta_priv->link_id)
+ t->txpriv.raw_link_id =
+ t->txpriv.link_id =
+ t->sta_priv->link_id;
+ else if (priv->mode != NL80211_IFTYPE_AP)
+ t->txpriv.raw_link_id =
+ t->txpriv.link_id = 0;
+ else if (is_multicast_ether_addr(t->da)) {
+ if (priv->enable_beacon) {
+ t->txpriv.raw_link_id = 0;
+ t->txpriv.link_id = CW1200_LINK_ID_AFTER_DTIM;
+ } else {
+ t->txpriv.raw_link_id = 0;
+ t->txpriv.link_id = 0;
+ }
+ } else {
+ t->txpriv.link_id = cw1200_find_link_id(priv, t->da);
+ if (!t->txpriv.link_id)
+ t->txpriv.link_id = cw1200_alloc_link_id(priv, t->da);
+ if (!t->txpriv.link_id) {
+ wiphy_err(priv->hw->wiphy,
+ "No more link IDs available.\n");
+ return -ENOENT;
+ }
+ t->txpriv.raw_link_id = t->txpriv.link_id;
+ }
+ if (t->txpriv.raw_link_id)
+ priv->link_id_db[t->txpriv.raw_link_id - 1].timestamp =
+ jiffies;
+ if (t->sta && (t->sta->uapsd_queues & BIT(t->queue)))
+ t->txpriv.link_id = CW1200_LINK_ID_UAPSD;
+ return 0;
+}
+
+static void
+cw1200_tx_h_pm(struct cw1200_common *priv,
+ struct cw1200_txinfo *t)
+{
+ if (ieee80211_is_auth(t->hdr->frame_control)) {
+ u32 mask = ~BIT(t->txpriv.raw_link_id);
+ spin_lock_bh(&priv->ps_state_lock);
+ priv->sta_asleep_mask &= mask;
+ priv->pspoll_mask &= mask;
+ spin_unlock_bh(&priv->ps_state_lock);
+ }
+}
+
+static void
+cw1200_tx_h_calc_tid(struct cw1200_common *priv,
+ struct cw1200_txinfo *t)
+{
+ if (ieee80211_is_data_qos(t->hdr->frame_control)) {
+ u8 *qos = ieee80211_get_qos_ctl(t->hdr);
+ t->txpriv.tid = qos[0] & IEEE80211_QOS_CTL_TID_MASK;
+ } else if (ieee80211_is_data(t->hdr->frame_control)) {
+ t->txpriv.tid = 0;
+ }
+}
+
+static int
+cw1200_tx_h_crypt(struct cw1200_common *priv,
+ struct cw1200_txinfo *t)
+{
+ if (!t->tx_info->control.hw_key ||
+ !ieee80211_has_protected(t->hdr->frame_control))
+ return 0;
+
+ t->hdrlen += t->tx_info->control.hw_key->iv_len;
+ skb_put(t->skb, t->tx_info->control.hw_key->icv_len);
+
+ if (t->tx_info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
+ skb_put(t->skb, 8); /* MIC space */
+
+ return 0;
+}
+
+static int
+cw1200_tx_h_align(struct cw1200_common *priv,
+ struct cw1200_txinfo *t,
+ u8 *flags)
+{
+ size_t offset = (size_t)t->skb->data & 3;
+
+ if (!offset)
+ return 0;
+
+ if (offset & 1) {
+ wiphy_err(priv->hw->wiphy,
+ "Bug: attempt to transmit a frame with wrong alignment: %zu\n",
+ offset);
+ return -EINVAL;
+ }
+
+ if (skb_headroom(t->skb) < offset) {
+ wiphy_err(priv->hw->wiphy,
+ "Bug: no space allocated for DMA alignment. headroom: %d\n",
+ skb_headroom(t->skb));
+ return -ENOMEM;
+ }
+ skb_push(t->skb, offset);
+ t->hdrlen += offset;
+ t->txpriv.offset += offset;
+ *flags |= WSM_TX_2BYTES_SHIFT;
+ cw1200_debug_tx_align(priv);
+ return 0;
+}
+
+static int
+cw1200_tx_h_action(struct cw1200_common *priv,
+ struct cw1200_txinfo *t)
+{
+ struct ieee80211_mgmt *mgmt =
+ (struct ieee80211_mgmt *)t->hdr;
+ if (ieee80211_is_action(t->hdr->frame_control) &&
+ mgmt->u.action.category == WLAN_CATEGORY_BACK)
+ return 1;
+ else
+ return 0;
+}
+
+/* Add WSM header */
+static struct wsm_tx *
+cw1200_tx_h_wsm(struct cw1200_common *priv,
+ struct cw1200_txinfo *t)
+{
+ struct wsm_tx *wsm;
+
+ if (skb_headroom(t->skb) < sizeof(struct wsm_tx)) {
+ wiphy_err(priv->hw->wiphy,
+ "Bug: no space allocated for WSM header. headroom: %d\n",
+ skb_headroom(t->skb));
+ return NULL;
+ }
+
+ wsm = (struct wsm_tx *)skb_push(t->skb, sizeof(struct wsm_tx));
+ t->txpriv.offset += sizeof(struct wsm_tx);
+ memset(wsm, 0, sizeof(*wsm));
+ wsm->hdr.len = __cpu_to_le16(t->skb->len);
+ wsm->hdr.id = __cpu_to_le16(0x0004);
+ wsm->queue_id = wsm_queue_id_to_wsm(t->queue);
+ return wsm;
+}
+
+/* BT Coex specific handling */
+static void
+cw1200_tx_h_bt(struct cw1200_common *priv,
+ struct cw1200_txinfo *t,
+ struct wsm_tx *wsm)
+{
+ u8 priority = 0;
+
+ if (!priv->bt_present)
+ return;
+
+ if (ieee80211_is_nullfunc(t->hdr->frame_control)) {
+ priority = WSM_EPTA_PRIORITY_MGT;
+ } else if (ieee80211_is_data(t->hdr->frame_control)) {
+ /* Skip LLC SNAP header (+6) */
+ u8 *payload = &t->skb->data[t->hdrlen];
+ __be16 *ethertype = (__be16 *)&payload[6];
+ if (be16_to_cpu(*ethertype) == ETH_P_PAE)
+ priority = WSM_EPTA_PRIORITY_EAPOL;
+ } else if (ieee80211_is_assoc_req(t->hdr->frame_control) ||
+ ieee80211_is_reassoc_req(t->hdr->frame_control)) {
+ struct ieee80211_mgmt *mgt_frame =
+ (struct ieee80211_mgmt *)t->hdr;
+
+ if (le16_to_cpu(mgt_frame->u.assoc_req.listen_interval) <
+ priv->listen_interval) {
+ pr_debug("Modified Listen Interval to %d from %d\n",
+ priv->listen_interval,
+ mgt_frame->u.assoc_req.listen_interval);
+ /* Replace listen interval derieved from
+ * the one read from SDD
+ */
+ mgt_frame->u.assoc_req.listen_interval = cpu_to_le16(priv->listen_interval);
+ }
+ }
+
+ if (!priority) {
+ if (ieee80211_is_action(t->hdr->frame_control))
+ priority = WSM_EPTA_PRIORITY_ACTION;
+ else if (ieee80211_is_mgmt(t->hdr->frame_control))
+ priority = WSM_EPTA_PRIORITY_MGT;
+ else if ((wsm->queue_id == WSM_QUEUE_VOICE))
+ priority = WSM_EPTA_PRIORITY_VOICE;
+ else if ((wsm->queue_id == WSM_QUEUE_VIDEO))
+ priority = WSM_EPTA_PRIORITY_VIDEO;
+ else
+ priority = WSM_EPTA_PRIORITY_DATA;
+ }
+
+ pr_debug("[TX] EPTA priority %d.\n", priority);
+
+ wsm->flags |= priority << 1;
+}
+
+static int
+cw1200_tx_h_rate_policy(struct cw1200_common *priv,
+ struct cw1200_txinfo *t,
+ struct wsm_tx *wsm)
+{
+ bool tx_policy_renew = false;
+
+ t->txpriv.rate_id = tx_policy_get(priv,
+ t->tx_info->control.rates, IEEE80211_TX_MAX_RATES,
+ &tx_policy_renew);
+ if (t->txpriv.rate_id == CW1200_INVALID_RATE_ID)
+ return -EFAULT;
+
+ wsm->flags |= t->txpriv.rate_id << 4;
+
+ t->rate = cw1200_get_tx_rate(priv,
+ &t->tx_info->control.rates[0]),
+ wsm->max_tx_rate = t->rate->hw_value;
+ if (t->rate->flags & IEEE80211_TX_RC_MCS) {
+ if (cw1200_ht_greenfield(&priv->ht_info))
+ wsm->ht_tx_parameters |=
+ __cpu_to_le32(WSM_HT_TX_GREENFIELD);
+ else
+ wsm->ht_tx_parameters |=
+ __cpu_to_le32(WSM_HT_TX_MIXED);
+ }
+
+ if (tx_policy_renew) {
+ pr_debug("[TX] TX policy renew.\n");
+ /* It's not so optimal to stop TX queues every now and then.
+ * Better to reimplement task scheduling with
+ * a counter. TODO.
+ */
+ wsm_lock_tx_async(priv);
+ cw1200_tx_queues_lock(priv);
+ if (queue_work(priv->workqueue,
+ &priv->tx_policy_upload_work) <= 0) {
+ cw1200_tx_queues_unlock(priv);
+ wsm_unlock_tx(priv);
+ }
+ }
+ return 0;
+}
+
+static bool
+cw1200_tx_h_pm_state(struct cw1200_common *priv,
+ struct cw1200_txinfo *t)
+{
+ int was_buffered = 1;
+
+ if (t->txpriv.link_id == CW1200_LINK_ID_AFTER_DTIM &&
+ !priv->buffered_multicasts) {
+ priv->buffered_multicasts = true;
+ if (priv->sta_asleep_mask)
+ queue_work(priv->workqueue,
+ &priv->multicast_start_work);
+ }
+
+ if (t->txpriv.raw_link_id && t->txpriv.tid < CW1200_MAX_TID)
+ was_buffered = priv->link_id_db[t->txpriv.raw_link_id - 1].buffered[t->txpriv.tid]++;
+
+ return !was_buffered;
+}
+
+/* ******************************************************************** */
+
+void cw1200_tx(struct ieee80211_hw *dev,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct cw1200_common *priv = dev->priv;
+ struct cw1200_txinfo t = {
+ .skb = skb,
+ .queue = skb_get_queue_mapping(skb),
+ .tx_info = IEEE80211_SKB_CB(skb),
+ .hdr = (struct ieee80211_hdr *)skb->data,
+ .txpriv.tid = CW1200_MAX_TID,
+ .txpriv.rate_id = CW1200_INVALID_RATE_ID,
+ };
+ struct ieee80211_sta *sta;
+ struct wsm_tx *wsm;
+ bool tid_update = 0;
+ u8 flags = 0;
+ int ret;
+
+ if (priv->bh_error)
+ goto drop;
+
+ t.hdrlen = ieee80211_hdrlen(t.hdr->frame_control);
+ t.da = ieee80211_get_DA(t.hdr);
+ if (control) {
+ t.sta = control->sta;
+ t.sta_priv = (struct cw1200_sta_priv *)&t.sta->drv_priv;
+ }
+
+ if (WARN_ON(t.queue >= 4))
+ goto drop;
+
+ ret = cw1200_tx_h_calc_link_ids(priv, &t);
+ if (ret)
+ goto drop;
+
+ pr_debug("[TX] TX %d bytes (queue: %d, link_id: %d (%d)).\n",
+ skb->len, t.queue, t.txpriv.link_id,
+ t.txpriv.raw_link_id);
+
+ cw1200_tx_h_pm(priv, &t);
+ cw1200_tx_h_calc_tid(priv, &t);
+ ret = cw1200_tx_h_crypt(priv, &t);
+ if (ret)
+ goto drop;
+ ret = cw1200_tx_h_align(priv, &t, &flags);
+ if (ret)
+ goto drop;
+ ret = cw1200_tx_h_action(priv, &t);
+ if (ret)
+ goto drop;
+ wsm = cw1200_tx_h_wsm(priv, &t);
+ if (!wsm) {
+ ret = -ENOMEM;
+ goto drop;
+ }
+ wsm->flags |= flags;
+ cw1200_tx_h_bt(priv, &t, wsm);
+ ret = cw1200_tx_h_rate_policy(priv, &t, wsm);
+ if (ret)
+ goto drop;
+
+ rcu_read_lock();
+ sta = rcu_dereference(t.sta);
+
+ spin_lock_bh(&priv->ps_state_lock);
+ {
+ tid_update = cw1200_tx_h_pm_state(priv, &t);
+ BUG_ON(cw1200_queue_put(&priv->tx_queue[t.queue],
+ t.skb, &t.txpriv));
+ }
+ spin_unlock_bh(&priv->ps_state_lock);
+
+ if (tid_update && sta)
+ ieee80211_sta_set_buffered(sta, t.txpriv.tid, true);
+
+ rcu_read_unlock();
+
+ cw1200_bh_wakeup(priv);
+
+ return;
+
+drop:
+ cw1200_skb_dtor(priv, skb, &t.txpriv);
+ return;
+}
+
+/* ******************************************************************** */
+
+static int cw1200_handle_action_rx(struct cw1200_common *priv,
+ struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+
+ /* Filter block ACK negotiation: fully controlled by firmware */
+ if (mgmt->u.action.category == WLAN_CATEGORY_BACK)
+ return 1;
+
+ return 0;
+}
+
+static int cw1200_handle_pspoll(struct cw1200_common *priv,
+ struct sk_buff *skb)
+{
+ struct ieee80211_sta *sta;
+ struct ieee80211_pspoll *pspoll = (struct ieee80211_pspoll *)skb->data;
+ int link_id = 0;
+ u32 pspoll_mask = 0;
+ int drop = 1;
+ int i;
+
+ if (priv->join_status != CW1200_JOIN_STATUS_AP)
+ goto done;
+ if (memcmp(priv->vif->addr, pspoll->bssid, ETH_ALEN))
+ goto done;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(priv->vif, pspoll->ta);
+ if (sta) {
+ struct cw1200_sta_priv *sta_priv;
+ sta_priv = (struct cw1200_sta_priv *)&sta->drv_priv;
+ link_id = sta_priv->link_id;
+ pspoll_mask = BIT(sta_priv->link_id);
+ }
+ rcu_read_unlock();
+ if (!link_id)
+ goto done;
+
+ priv->pspoll_mask |= pspoll_mask;
+ drop = 0;
+
+ /* Do not report pspols if data for given link id is queued already. */
+ for (i = 0; i < 4; ++i) {
+ if (cw1200_queue_get_num_queued(&priv->tx_queue[i],
+ pspoll_mask)) {
+ cw1200_bh_wakeup(priv);
+ drop = 1;
+ break;
+ }
+ }
+ pr_debug("[RX] PSPOLL: %s\n", drop ? "local" : "fwd");
+done:
+ return drop;
+}
+
+/* ******************************************************************** */
+
+void cw1200_tx_confirm_cb(struct cw1200_common *priv,
+ int link_id,
+ struct wsm_tx_confirm *arg)
+{
+ u8 queue_id = cw1200_queue_get_queue_id(arg->packet_id);
+ struct cw1200_queue *queue = &priv->tx_queue[queue_id];
+ struct sk_buff *skb;
+ const struct cw1200_txpriv *txpriv;
+
+ pr_debug("[TX] TX confirm: %d, %d.\n",
+ arg->status, arg->ack_failures);
+
+ if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) {
+ /* STA is stopped. */
+ return;
+ }
+
+ if (WARN_ON(queue_id >= 4))
+ return;
+
+ if (arg->status)
+ pr_debug("TX failed: %d.\n", arg->status);
+
+ if ((arg->status == WSM_REQUEUE) &&
+ (arg->flags & WSM_TX_STATUS_REQUEUE)) {
+ /* "Requeue" means "implicit suspend" */
+ struct wsm_suspend_resume suspend = {
+ .link_id = link_id,
+ .stop = 1,
+ .multicast = !link_id,
+ };
+ cw1200_suspend_resume(priv, &suspend);
+ wiphy_warn(priv->hw->wiphy, "Requeue for link_id %d (try %d). STAs asleep: 0x%.8X\n",
+ link_id,
+ cw1200_queue_get_generation(arg->packet_id) + 1,
+ priv->sta_asleep_mask);
+ cw1200_queue_requeue(queue, arg->packet_id);
+ spin_lock_bh(&priv->ps_state_lock);
+ if (!link_id) {
+ priv->buffered_multicasts = true;
+ if (priv->sta_asleep_mask) {
+ queue_work(priv->workqueue,
+ &priv->multicast_start_work);
+ }
+ }
+ spin_unlock_bh(&priv->ps_state_lock);
+ } else if (!cw1200_queue_get_skb(queue, arg->packet_id,
+ &skb, &txpriv)) {
+ struct ieee80211_tx_info *tx = IEEE80211_SKB_CB(skb);
+ int tx_count = arg->ack_failures;
+ u8 ht_flags = 0;
+ int i;
+
+ if (cw1200_ht_greenfield(&priv->ht_info))
+ ht_flags |= IEEE80211_TX_RC_GREEN_FIELD;
+
+ spin_lock(&priv->bss_loss_lock);
+ if (priv->bss_loss_state &&
+ arg->packet_id == priv->bss_loss_confirm_id) {
+ if (arg->status) {
+ /* Recovery failed */
+ __cw1200_cqm_bssloss_sm(priv, 0, 0, 1);
+ } else {
+ /* Recovery succeeded */
+ __cw1200_cqm_bssloss_sm(priv, 0, 1, 0);
+ }
+ }
+ spin_unlock(&priv->bss_loss_lock);
+
+ if (!arg->status) {
+ tx->flags |= IEEE80211_TX_STAT_ACK;
+ ++tx_count;
+ cw1200_debug_txed(priv);
+ if (arg->flags & WSM_TX_STATUS_AGGREGATION) {
+ /* Do not report aggregation to mac80211:
+ * it confuses minstrel a lot.
+ */
+ /* tx->flags |= IEEE80211_TX_STAT_AMPDU; */
+ cw1200_debug_txed_agg(priv);
+ }
+ } else {
+ if (tx_count)
+ ++tx_count;
+ }
+
+ for (i = 0; i < IEEE80211_TX_MAX_RATES; ++i) {
+ if (tx->status.rates[i].count >= tx_count) {
+ tx->status.rates[i].count = tx_count;
+ break;
+ }
+ tx_count -= tx->status.rates[i].count;
+ if (tx->status.rates[i].flags & IEEE80211_TX_RC_MCS)
+ tx->status.rates[i].flags |= ht_flags;
+ }
+
+ for (++i; i < IEEE80211_TX_MAX_RATES; ++i) {
+ tx->status.rates[i].count = 0;
+ tx->status.rates[i].idx = -1;
+ }
+
+ /* Pull off any crypto trailers that we added on */
+ if (tx->control.hw_key) {
+ skb_trim(skb, skb->len - tx->control.hw_key->icv_len);
+ if (tx->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
+ skb_trim(skb, skb->len - 8); /* MIC space */
+ }
+ cw1200_queue_remove(queue, arg->packet_id);
+ }
+ /* XXX TODO: Only wake if there are pending transmits.. */
+ cw1200_bh_wakeup(priv);
+}
+
+static void cw1200_notify_buffered_tx(struct cw1200_common *priv,
+ struct sk_buff *skb, int link_id, int tid)
+{
+ struct ieee80211_sta *sta;
+ struct ieee80211_hdr *hdr;
+ u8 *buffered;
+ u8 still_buffered = 0;
+
+ if (link_id && tid < CW1200_MAX_TID) {
+ buffered = priv->link_id_db
+ [link_id - 1].buffered;
+
+ spin_lock_bh(&priv->ps_state_lock);
+ if (!WARN_ON(!buffered[tid]))
+ still_buffered = --buffered[tid];
+ spin_unlock_bh(&priv->ps_state_lock);
+
+ if (!still_buffered && tid < CW1200_MAX_TID) {
+ hdr = (struct ieee80211_hdr *)skb->data;
+ rcu_read_lock();
+ sta = ieee80211_find_sta(priv->vif, hdr->addr1);
+ if (sta)
+ ieee80211_sta_set_buffered(sta, tid, false);
+ rcu_read_unlock();
+ }
+ }
+}
+
+void cw1200_skb_dtor(struct cw1200_common *priv,
+ struct sk_buff *skb,
+ const struct cw1200_txpriv *txpriv)
+{
+ skb_pull(skb, txpriv->offset);
+ if (txpriv->rate_id != CW1200_INVALID_RATE_ID) {
+ cw1200_notify_buffered_tx(priv, skb,
+ txpriv->raw_link_id, txpriv->tid);
+ tx_policy_put(priv, txpriv->rate_id);
+ }
+ ieee80211_tx_status(priv->hw, skb);
+}
+
+void cw1200_rx_cb(struct cw1200_common *priv,
+ struct wsm_rx *arg,
+ int link_id,
+ struct sk_buff **skb_p)
+{
+ struct sk_buff *skb = *skb_p;
+ struct ieee80211_rx_status *hdr = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_hdr *frame = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+ struct cw1200_link_entry *entry = NULL;
+ unsigned long grace_period;
+
+ bool early_data = false;
+ bool p2p = priv->vif && priv->vif->p2p;
+ size_t hdrlen;
+ hdr->flag = 0;
+
+ if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) {
+ /* STA is stopped. */
+ goto drop;
+ }
+
+ if (link_id && link_id <= CW1200_MAX_STA_IN_AP_MODE) {
+ entry = &priv->link_id_db[link_id - 1];
+ if (entry->status == CW1200_LINK_SOFT &&
+ ieee80211_is_data(frame->frame_control))
+ early_data = true;
+ entry->timestamp = jiffies;
+ } else if (p2p &&
+ ieee80211_is_action(frame->frame_control) &&
+ (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)) {
+ pr_debug("[RX] Going to MAP&RESET link ID\n");
+ WARN_ON(work_pending(&priv->linkid_reset_work));
+ memcpy(&priv->action_frame_sa[0],
+ ieee80211_get_SA(frame), ETH_ALEN);
+ priv->action_linkid = 0;
+ schedule_work(&priv->linkid_reset_work);
+ }
+
+ if (link_id && p2p &&
+ ieee80211_is_action(frame->frame_control) &&
+ (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)) {
+ /* Link ID already exists for the ACTION frame.
+ * Reset and Remap
+ */
+ WARN_ON(work_pending(&priv->linkid_reset_work));
+ memcpy(&priv->action_frame_sa[0],
+ ieee80211_get_SA(frame), ETH_ALEN);
+ priv->action_linkid = link_id;
+ schedule_work(&priv->linkid_reset_work);
+ }
+ if (arg->status) {
+ if (arg->status == WSM_STATUS_MICFAILURE) {
+ pr_debug("[RX] MIC failure.\n");
+ hdr->flag |= RX_FLAG_MMIC_ERROR;
+ } else if (arg->status == WSM_STATUS_NO_KEY_FOUND) {
+ pr_debug("[RX] No key found.\n");
+ goto drop;
+ } else {
+ pr_debug("[RX] Receive failure: %d.\n",
+ arg->status);
+ goto drop;
+ }
+ }
+
+ if (skb->len < sizeof(struct ieee80211_pspoll)) {
+ wiphy_warn(priv->hw->wiphy, "Mailformed SDU rx'ed. Size is lesser than IEEE header.\n");
+ goto drop;
+ }
+
+ if (ieee80211_is_pspoll(frame->frame_control))
+ if (cw1200_handle_pspoll(priv, skb))
+ goto drop;
+
+ hdr->band = ((arg->channel_number & 0xff00) ||
+ (arg->channel_number > 14)) ?
+ IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
+ hdr->freq = ieee80211_channel_to_frequency(
+ arg->channel_number,
+ hdr->band);
+
+ if (arg->rx_rate >= 14) {
+ hdr->flag |= RX_FLAG_HT;
+ hdr->rate_idx = arg->rx_rate - 14;
+ } else if (arg->rx_rate >= 4) {
+ hdr->rate_idx = arg->rx_rate - 2;
+ } else {
+ hdr->rate_idx = arg->rx_rate;
+ }
+
+ hdr->signal = (s8)arg->rcpi_rssi;
+ hdr->antenna = 0;
+
+ hdrlen = ieee80211_hdrlen(frame->frame_control);
+
+ if (WSM_RX_STATUS_ENCRYPTION(arg->flags)) {
+ size_t iv_len = 0, icv_len = 0;
+
+ hdr->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED;
+
+ /* Oops... There is no fast way to ask mac80211 about
+ * IV/ICV lengths. Even defineas are not exposed.
+ */
+ switch (WSM_RX_STATUS_ENCRYPTION(arg->flags)) {
+ case WSM_RX_STATUS_WEP:
+ iv_len = 4 /* WEP_IV_LEN */;
+ icv_len = 4 /* WEP_ICV_LEN */;
+ break;
+ case WSM_RX_STATUS_TKIP:
+ iv_len = 8 /* TKIP_IV_LEN */;
+ icv_len = 4 /* TKIP_ICV_LEN */
+ + 8 /*MICHAEL_MIC_LEN*/;
+ hdr->flag |= RX_FLAG_MMIC_STRIPPED;
+ break;
+ case WSM_RX_STATUS_AES:
+ iv_len = 8 /* CCMP_HDR_LEN */;
+ icv_len = 8 /* CCMP_MIC_LEN */;
+ break;
+ case WSM_RX_STATUS_WAPI:
+ iv_len = 18 /* WAPI_HDR_LEN */;
+ icv_len = 16 /* WAPI_MIC_LEN */;
+ break;
+ default:
+ pr_warn("Unknown encryption type %d\n",
+ WSM_RX_STATUS_ENCRYPTION(arg->flags));
+ goto drop;
+ }
+
+ /* Firmware strips ICV in case of MIC failure. */
+ if (arg->status == WSM_STATUS_MICFAILURE)
+ icv_len = 0;
+
+ if (skb->len < hdrlen + iv_len + icv_len) {
+ wiphy_warn(priv->hw->wiphy, "Malformed SDU rx'ed. Size is lesser than crypto headers.\n");
+ goto drop;
+ }
+
+ /* Remove IV, ICV and MIC */
+ skb_trim(skb, skb->len - icv_len);
+ memmove(skb->data + iv_len, skb->data, hdrlen);
+ skb_pull(skb, iv_len);
+ }
+
+ /* Remove TSF from the end of frame */
+ if (arg->flags & WSM_RX_STATUS_TSF_INCLUDED) {
+ memcpy(&hdr->mactime, skb->data + skb->len - 8, 8);
+ hdr->mactime = le64_to_cpu(hdr->mactime);
+ if (skb->len >= 8)
+ skb_trim(skb, skb->len - 8);
+ } else {
+ hdr->mactime = 0;
+ }
+
+ cw1200_debug_rxed(priv);
+ if (arg->flags & WSM_RX_STATUS_AGGREGATE)
+ cw1200_debug_rxed_agg(priv);
+
+ if (ieee80211_is_action(frame->frame_control) &&
+ (arg->flags & WSM_RX_STATUS_ADDRESS1)) {
+ if (cw1200_handle_action_rx(priv, skb))
+ return;
+ } else if (ieee80211_is_beacon(frame->frame_control) &&
+ !arg->status && priv->vif &&
+ !memcmp(ieee80211_get_SA(frame), priv->vif->bss_conf.bssid,
+ ETH_ALEN)) {
+ const u8 *tim_ie;
+ u8 *ies = ((struct ieee80211_mgmt *)
+ (skb->data))->u.beacon.variable;
+ size_t ies_len = skb->len - (ies - (u8 *)(skb->data));
+
+ tim_ie = cfg80211_find_ie(WLAN_EID_TIM, ies, ies_len);
+ if (tim_ie) {
+ struct ieee80211_tim_ie *tim =
+ (struct ieee80211_tim_ie *)&tim_ie[2];
+
+ if (priv->join_dtim_period != tim->dtim_period) {
+ priv->join_dtim_period = tim->dtim_period;
+ queue_work(priv->workqueue,
+ &priv->set_beacon_wakeup_period_work);
+ }
+ }
+
+ /* Disable beacon filter once we're associated... */
+ if (priv->disable_beacon_filter &&
+ (priv->vif->bss_conf.assoc ||
+ priv->vif->bss_conf.ibss_joined)) {
+ priv->disable_beacon_filter = false;
+ queue_work(priv->workqueue,
+ &priv->update_filtering_work);
+ }
+ }
+
+ /* Stay awake after frame is received to give
+ * userspace chance to react and acquire appropriate
+ * wakelock.
+ */
+ if (ieee80211_is_auth(frame->frame_control))
+ grace_period = 5 * HZ;
+ else if (ieee80211_is_deauth(frame->frame_control))
+ grace_period = 5 * HZ;
+ else
+ grace_period = 1 * HZ;
+ cw1200_pm_stay_awake(&priv->pm_state, grace_period);
+
+ if (early_data) {
+ spin_lock_bh(&priv->ps_state_lock);
+ /* Double-check status with lock held */
+ if (entry->status == CW1200_LINK_SOFT)
+ skb_queue_tail(&entry->rx_queue, skb);
+ else
+ ieee80211_rx_irqsafe(priv->hw, skb);
+ spin_unlock_bh(&priv->ps_state_lock);
+ } else {
+ ieee80211_rx_irqsafe(priv->hw, skb);
+ }
+ *skb_p = NULL;
+
+ return;
+
+drop:
+ /* TODO: update failure counters */
+ return;
+}
+
+/* ******************************************************************** */
+/* Security */
+
+int cw1200_alloc_key(struct cw1200_common *priv)
+{
+ int idx;
+
+ idx = ffs(~priv->key_map) - 1;
+ if (idx < 0 || idx > WSM_KEY_MAX_INDEX)
+ return -1;
+
+ priv->key_map |= BIT(idx);
+ priv->keys[idx].index = idx;
+ return idx;
+}
+
+void cw1200_free_key(struct cw1200_common *priv, int idx)
+{
+ BUG_ON(!(priv->key_map & BIT(idx)));
+ memset(&priv->keys[idx], 0, sizeof(priv->keys[idx]));
+ priv->key_map &= ~BIT(idx);
+}
+
+void cw1200_free_keys(struct cw1200_common *priv)
+{
+ memset(&priv->keys, 0, sizeof(priv->keys));
+ priv->key_map = 0;
+}
+
+int cw1200_upload_keys(struct cw1200_common *priv)
+{
+ int idx, ret = 0;
+ for (idx = 0; idx <= WSM_KEY_MAX_INDEX; ++idx)
+ if (priv->key_map & BIT(idx)) {
+ ret = wsm_add_key(priv, &priv->keys[idx]);
+ if (ret < 0)
+ break;
+ }
+ return ret;
+}
+
+/* Workaround for WFD test case 6.1.10 */
+void cw1200_link_id_reset(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, linkid_reset_work);
+ int temp_linkid;
+
+ if (!priv->action_linkid) {
+ /* In GO mode we can receive ACTION frames without a linkID */
+ temp_linkid = cw1200_alloc_link_id(priv,
+ &priv->action_frame_sa[0]);
+ WARN_ON(!temp_linkid);
+ if (temp_linkid) {
+ /* Make sure we execute the WQ */
+ flush_workqueue(priv->workqueue);
+ /* Release the link ID */
+ spin_lock_bh(&priv->ps_state_lock);
+ priv->link_id_db[temp_linkid - 1].prev_status =
+ priv->link_id_db[temp_linkid - 1].status;
+ priv->link_id_db[temp_linkid - 1].status =
+ CW1200_LINK_RESET;
+ spin_unlock_bh(&priv->ps_state_lock);
+ wsm_lock_tx_async(priv);
+ if (queue_work(priv->workqueue,
+ &priv->link_id_work) <= 0)
+ wsm_unlock_tx(priv);
+ }
+ } else {
+ spin_lock_bh(&priv->ps_state_lock);
+ priv->link_id_db[priv->action_linkid - 1].prev_status =
+ priv->link_id_db[priv->action_linkid - 1].status;
+ priv->link_id_db[priv->action_linkid - 1].status =
+ CW1200_LINK_RESET_REMAP;
+ spin_unlock_bh(&priv->ps_state_lock);
+ wsm_lock_tx_async(priv);
+ if (queue_work(priv->workqueue, &priv->link_id_work) <= 0)
+ wsm_unlock_tx(priv);
+ flush_workqueue(priv->workqueue);
+ }
+}
+
+int cw1200_find_link_id(struct cw1200_common *priv, const u8 *mac)
+{
+ int i, ret = 0;
+ spin_lock_bh(&priv->ps_state_lock);
+ for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) {
+ if (!memcmp(mac, priv->link_id_db[i].mac, ETH_ALEN) &&
+ priv->link_id_db[i].status) {
+ priv->link_id_db[i].timestamp = jiffies;
+ ret = i + 1;
+ break;
+ }
+ }
+ spin_unlock_bh(&priv->ps_state_lock);
+ return ret;
+}
+
+int cw1200_alloc_link_id(struct cw1200_common *priv, const u8 *mac)
+{
+ int i, ret = 0;
+ unsigned long max_inactivity = 0;
+ unsigned long now = jiffies;
+
+ spin_lock_bh(&priv->ps_state_lock);
+ for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) {
+ if (!priv->link_id_db[i].status) {
+ ret = i + 1;
+ break;
+ } else if (priv->link_id_db[i].status != CW1200_LINK_HARD &&
+ !priv->tx_queue_stats.link_map_cache[i + 1]) {
+ unsigned long inactivity =
+ now - priv->link_id_db[i].timestamp;
+ if (inactivity < max_inactivity)
+ continue;
+ max_inactivity = inactivity;
+ ret = i + 1;
+ }
+ }
+ if (ret) {
+ struct cw1200_link_entry *entry = &priv->link_id_db[ret - 1];
+ pr_debug("[AP] STA added, link_id: %d\n", ret);
+ entry->status = CW1200_LINK_RESERVE;
+ memcpy(&entry->mac, mac, ETH_ALEN);
+ memset(&entry->buffered, 0, CW1200_MAX_TID);
+ skb_queue_head_init(&entry->rx_queue);
+ wsm_lock_tx_async(priv);
+ if (queue_work(priv->workqueue, &priv->link_id_work) <= 0)
+ wsm_unlock_tx(priv);
+ } else {
+ wiphy_info(priv->hw->wiphy,
+ "[AP] Early: no more link IDs available.\n");
+ }
+
+ spin_unlock_bh(&priv->ps_state_lock);
+ return ret;
+}
+
+void cw1200_link_id_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, link_id_work);
+ wsm_flush_tx(priv);
+ cw1200_link_id_gc_work(&priv->link_id_gc_work.work);
+ wsm_unlock_tx(priv);
+}
+
+void cw1200_link_id_gc_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, link_id_gc_work.work);
+ struct wsm_reset reset = {
+ .reset_statistics = false,
+ };
+ struct wsm_map_link map_link = {
+ .link_id = 0,
+ };
+ unsigned long now = jiffies;
+ unsigned long next_gc = -1;
+ long ttl;
+ bool need_reset;
+ u32 mask;
+ int i;
+
+ if (priv->join_status != CW1200_JOIN_STATUS_AP)
+ return;
+
+ wsm_lock_tx(priv);
+ spin_lock_bh(&priv->ps_state_lock);
+ for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) {
+ need_reset = false;
+ mask = BIT(i + 1);
+ if (priv->link_id_db[i].status == CW1200_LINK_RESERVE ||
+ (priv->link_id_db[i].status == CW1200_LINK_HARD &&
+ !(priv->link_id_map & mask))) {
+ if (priv->link_id_map & mask) {
+ priv->sta_asleep_mask &= ~mask;
+ priv->pspoll_mask &= ~mask;
+ need_reset = true;
+ }
+ priv->link_id_map |= mask;
+ if (priv->link_id_db[i].status != CW1200_LINK_HARD)
+ priv->link_id_db[i].status = CW1200_LINK_SOFT;
+ memcpy(map_link.mac_addr, priv->link_id_db[i].mac,
+ ETH_ALEN);
+ spin_unlock_bh(&priv->ps_state_lock);
+ if (need_reset) {
+ reset.link_id = i + 1;
+ wsm_reset(priv, &reset);
+ }
+ map_link.link_id = i + 1;
+ wsm_map_link(priv, &map_link);
+ next_gc = min(next_gc, CW1200_LINK_ID_GC_TIMEOUT);
+ spin_lock_bh(&priv->ps_state_lock);
+ } else if (priv->link_id_db[i].status == CW1200_LINK_SOFT) {
+ ttl = priv->link_id_db[i].timestamp - now +
+ CW1200_LINK_ID_GC_TIMEOUT;
+ if (ttl <= 0) {
+ need_reset = true;
+ priv->link_id_db[i].status = CW1200_LINK_OFF;
+ priv->link_id_map &= ~mask;
+ priv->sta_asleep_mask &= ~mask;
+ priv->pspoll_mask &= ~mask;
+ memset(map_link.mac_addr, 0, ETH_ALEN);
+ spin_unlock_bh(&priv->ps_state_lock);
+ reset.link_id = i + 1;
+ wsm_reset(priv, &reset);
+ spin_lock_bh(&priv->ps_state_lock);
+ } else {
+ next_gc = min_t(unsigned long, next_gc, ttl);
+ }
+ } else if (priv->link_id_db[i].status == CW1200_LINK_RESET ||
+ priv->link_id_db[i].status ==
+ CW1200_LINK_RESET_REMAP) {
+ int status = priv->link_id_db[i].status;
+ priv->link_id_db[i].status =
+ priv->link_id_db[i].prev_status;
+ priv->link_id_db[i].timestamp = now;
+ reset.link_id = i + 1;
+ spin_unlock_bh(&priv->ps_state_lock);
+ wsm_reset(priv, &reset);
+ if (status == CW1200_LINK_RESET_REMAP) {
+ memcpy(map_link.mac_addr,
+ priv->link_id_db[i].mac,
+ ETH_ALEN);
+ map_link.link_id = i + 1;
+ wsm_map_link(priv, &map_link);
+ next_gc = min(next_gc,
+ CW1200_LINK_ID_GC_TIMEOUT);
+ }
+ spin_lock_bh(&priv->ps_state_lock);
+ }
+ if (need_reset) {
+ skb_queue_purge(&priv->link_id_db[i].rx_queue);
+ pr_debug("[AP] STA removed, link_id: %d\n",
+ reset.link_id);
+ }
+ }
+ spin_unlock_bh(&priv->ps_state_lock);
+ if (next_gc != -1)
+ queue_delayed_work(priv->workqueue,
+ &priv->link_id_gc_work, next_gc);
+ wsm_unlock_tx(priv);
+}
diff --git a/drivers/net/wireless/cw1200/txrx.h b/drivers/net/wireless/cw1200/txrx.h
new file mode 100644
index 000000000000..492a4e14213b
--- /dev/null
+++ b/drivers/net/wireless/cw1200/txrx.h
@@ -0,0 +1,106 @@
+/*
+ * Datapath interface for ST-Ericsson CW1200 mac80211 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CW1200_TXRX_H
+#define CW1200_TXRX_H
+
+#include <linux/list.h>
+
+/* extern */ struct ieee80211_hw;
+/* extern */ struct sk_buff;
+/* extern */ struct wsm_tx;
+/* extern */ struct wsm_rx;
+/* extern */ struct wsm_tx_confirm;
+/* extern */ struct cw1200_txpriv;
+
+struct tx_policy {
+ union {
+ __le32 tbl[3];
+ u8 raw[12];
+ };
+ u8 defined;
+ u8 usage_count;
+ u8 retry_count;
+ u8 uploaded;
+};
+
+struct tx_policy_cache_entry {
+ struct tx_policy policy;
+ struct list_head link;
+};
+
+#define TX_POLICY_CACHE_SIZE (8)
+struct tx_policy_cache {
+ struct tx_policy_cache_entry cache[TX_POLICY_CACHE_SIZE];
+ struct list_head used;
+ struct list_head free;
+ spinlock_t lock; /* Protect policy cache */
+};
+
+/* ******************************************************************** */
+/* TX policy cache */
+/* Intention of TX policy cache is an overcomplicated WSM API.
+ * Device does not accept per-PDU tx retry sequence.
+ * It uses "tx retry policy id" instead, so driver code has to sync
+ * linux tx retry sequences with a retry policy table in the device.
+ */
+void tx_policy_init(struct cw1200_common *priv);
+void tx_policy_upload_work(struct work_struct *work);
+void tx_policy_clean(struct cw1200_common *priv);
+
+/* ******************************************************************** */
+/* TX implementation */
+
+u32 cw1200_rate_mask_to_wsm(struct cw1200_common *priv,
+ u32 rates);
+void cw1200_tx(struct ieee80211_hw *dev,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb);
+void cw1200_skb_dtor(struct cw1200_common *priv,
+ struct sk_buff *skb,
+ const struct cw1200_txpriv *txpriv);
+
+/* ******************************************************************** */
+/* WSM callbacks */
+
+void cw1200_tx_confirm_cb(struct cw1200_common *priv,
+ int link_id,
+ struct wsm_tx_confirm *arg);
+void cw1200_rx_cb(struct cw1200_common *priv,
+ struct wsm_rx *arg,
+ int link_id,
+ struct sk_buff **skb_p);
+
+/* ******************************************************************** */
+/* Timeout */
+
+void cw1200_tx_timeout(struct work_struct *work);
+
+/* ******************************************************************** */
+/* Security */
+int cw1200_alloc_key(struct cw1200_common *priv);
+void cw1200_free_key(struct cw1200_common *priv, int idx);
+void cw1200_free_keys(struct cw1200_common *priv);
+int cw1200_upload_keys(struct cw1200_common *priv);
+
+/* ******************************************************************** */
+/* Workaround for WFD test case 6.1.10 */
+void cw1200_link_id_reset(struct work_struct *work);
+
+#define CW1200_LINK_ID_GC_TIMEOUT ((unsigned long)(10 * HZ))
+
+int cw1200_find_link_id(struct cw1200_common *priv, const u8 *mac);
+int cw1200_alloc_link_id(struct cw1200_common *priv, const u8 *mac);
+void cw1200_link_id_work(struct work_struct *work);
+void cw1200_link_id_gc_work(struct work_struct *work);
+
+
+#endif /* CW1200_TXRX_H */
diff --git a/drivers/net/wireless/cw1200/wsm.c b/drivers/net/wireless/cw1200/wsm.c
new file mode 100644
index 000000000000..cbb74d7a9be5
--- /dev/null
+++ b/drivers/net/wireless/cw1200/wsm.c
@@ -0,0 +1,1822 @@
+/*
+ * WSM host interface (HI) implementation for
+ * ST-Ericsson CW1200 mac80211 drivers.
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/random.h>
+
+#include "cw1200.h"
+#include "wsm.h"
+#include "bh.h"
+#include "sta.h"
+#include "debug.h"
+
+#define WSM_CMD_TIMEOUT (2 * HZ) /* With respect to interrupt loss */
+#define WSM_CMD_START_TIMEOUT (7 * HZ)
+#define WSM_CMD_RESET_TIMEOUT (3 * HZ) /* 2 sec. timeout was observed. */
+#define WSM_CMD_MAX_TIMEOUT (3 * HZ)
+
+#define WSM_SKIP(buf, size) \
+ do { \
+ if ((buf)->data + size > (buf)->end) \
+ goto underflow; \
+ (buf)->data += size; \
+ } while (0)
+
+#define WSM_GET(buf, ptr, size) \
+ do { \
+ if ((buf)->data + size > (buf)->end) \
+ goto underflow; \
+ memcpy(ptr, (buf)->data, size); \
+ (buf)->data += size; \
+ } while (0)
+
+#define __WSM_GET(buf, type, type2, cvt) \
+ ({ \
+ type val; \
+ if ((buf)->data + sizeof(type) > (buf)->end) \
+ goto underflow; \
+ val = cvt(*(type2 *)(buf)->data); \
+ (buf)->data += sizeof(type); \
+ val; \
+ })
+
+#define WSM_GET8(buf) __WSM_GET(buf, u8, u8, (u8))
+#define WSM_GET16(buf) __WSM_GET(buf, u16, __le16, __le16_to_cpu)
+#define WSM_GET32(buf) __WSM_GET(buf, u32, __le32, __le32_to_cpu)
+
+#define WSM_PUT(buf, ptr, size) \
+ do { \
+ if ((buf)->data + size > (buf)->end) \
+ if (wsm_buf_reserve((buf), size)) \
+ goto nomem; \
+ memcpy((buf)->data, ptr, size); \
+ (buf)->data += size; \
+ } while (0)
+
+#define __WSM_PUT(buf, val, type, type2, cvt) \
+ do { \
+ if ((buf)->data + sizeof(type) > (buf)->end) \
+ if (wsm_buf_reserve((buf), sizeof(type))) \
+ goto nomem; \
+ *(type2 *)(buf)->data = cvt(val); \
+ (buf)->data += sizeof(type); \
+ } while (0)
+
+#define WSM_PUT8(buf, val) __WSM_PUT(buf, val, u8, u8, (u8))
+#define WSM_PUT16(buf, val) __WSM_PUT(buf, val, u16, __le16, __cpu_to_le16)
+#define WSM_PUT32(buf, val) __WSM_PUT(buf, val, u32, __le32, __cpu_to_le32)
+
+static void wsm_buf_reset(struct wsm_buf *buf);
+static int wsm_buf_reserve(struct wsm_buf *buf, size_t extra_size);
+
+static int wsm_cmd_send(struct cw1200_common *priv,
+ struct wsm_buf *buf,
+ void *arg, u16 cmd, long tmo);
+
+#define wsm_cmd_lock(__priv) mutex_lock(&((__priv)->wsm_cmd_mux))
+#define wsm_cmd_unlock(__priv) mutex_unlock(&((__priv)->wsm_cmd_mux))
+
+/* ******************************************************************** */
+/* WSM API implementation */
+
+static int wsm_generic_confirm(struct cw1200_common *priv,
+ void *arg,
+ struct wsm_buf *buf)
+{
+ u32 status = WSM_GET32(buf);
+ if (status != WSM_STATUS_SUCCESS)
+ return -EINVAL;
+ return 0;
+
+underflow:
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+int wsm_configuration(struct cw1200_common *priv, struct wsm_configuration *arg)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+ wsm_cmd_lock(priv);
+
+ WSM_PUT32(buf, arg->dot11MaxTransmitMsduLifeTime);
+ WSM_PUT32(buf, arg->dot11MaxReceiveLifeTime);
+ WSM_PUT32(buf, arg->dot11RtsThreshold);
+
+ /* DPD block. */
+ WSM_PUT16(buf, arg->dpdData_size + 12);
+ WSM_PUT16(buf, 1); /* DPD version */
+ WSM_PUT(buf, arg->dot11StationId, ETH_ALEN);
+ WSM_PUT16(buf, 5); /* DPD flags */
+ WSM_PUT(buf, arg->dpdData, arg->dpdData_size);
+
+ ret = wsm_cmd_send(priv, buf, arg,
+ WSM_CONFIGURATION_REQ_ID, WSM_CMD_TIMEOUT);
+
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+static int wsm_configuration_confirm(struct cw1200_common *priv,
+ struct wsm_configuration *arg,
+ struct wsm_buf *buf)
+{
+ int i;
+ int status;
+
+ status = WSM_GET32(buf);
+ if (WARN_ON(status != WSM_STATUS_SUCCESS))
+ return -EINVAL;
+
+ WSM_GET(buf, arg->dot11StationId, ETH_ALEN);
+ arg->dot11FrequencyBandsSupported = WSM_GET8(buf);
+ WSM_SKIP(buf, 1);
+ arg->supportedRateMask = WSM_GET32(buf);
+ for (i = 0; i < 2; ++i) {
+ arg->txPowerRange[i].min_power_level = WSM_GET32(buf);
+ arg->txPowerRange[i].max_power_level = WSM_GET32(buf);
+ arg->txPowerRange[i].stepping = WSM_GET32(buf);
+ }
+ return 0;
+
+underflow:
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+/* ******************************************************************** */
+
+int wsm_reset(struct cw1200_common *priv, const struct wsm_reset *arg)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+ u16 cmd = WSM_RESET_REQ_ID | WSM_TX_LINK_ID(arg->link_id);
+
+ wsm_cmd_lock(priv);
+
+ WSM_PUT32(buf, arg->reset_statistics ? 0 : 1);
+ ret = wsm_cmd_send(priv, buf, NULL, cmd, WSM_CMD_RESET_TIMEOUT);
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+struct wsm_mib {
+ u16 mib_id;
+ void *buf;
+ size_t buf_size;
+};
+
+int wsm_read_mib(struct cw1200_common *priv, u16 mib_id, void *_buf,
+ size_t buf_size)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+ struct wsm_mib mib_buf = {
+ .mib_id = mib_id,
+ .buf = _buf,
+ .buf_size = buf_size,
+ };
+ wsm_cmd_lock(priv);
+
+ WSM_PUT16(buf, mib_id);
+ WSM_PUT16(buf, 0);
+
+ ret = wsm_cmd_send(priv, buf, &mib_buf,
+ WSM_READ_MIB_REQ_ID, WSM_CMD_TIMEOUT);
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+static int wsm_read_mib_confirm(struct cw1200_common *priv,
+ struct wsm_mib *arg,
+ struct wsm_buf *buf)
+{
+ u16 size;
+ if (WARN_ON(WSM_GET32(buf) != WSM_STATUS_SUCCESS))
+ return -EINVAL;
+
+ if (WARN_ON(WSM_GET16(buf) != arg->mib_id))
+ return -EINVAL;
+
+ size = WSM_GET16(buf);
+ if (size > arg->buf_size)
+ size = arg->buf_size;
+
+ WSM_GET(buf, arg->buf, size);
+ arg->buf_size = size;
+ return 0;
+
+underflow:
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+/* ******************************************************************** */
+
+int wsm_write_mib(struct cw1200_common *priv, u16 mib_id, void *_buf,
+ size_t buf_size)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+ struct wsm_mib mib_buf = {
+ .mib_id = mib_id,
+ .buf = _buf,
+ .buf_size = buf_size,
+ };
+
+ wsm_cmd_lock(priv);
+
+ WSM_PUT16(buf, mib_id);
+ WSM_PUT16(buf, buf_size);
+ WSM_PUT(buf, _buf, buf_size);
+
+ ret = wsm_cmd_send(priv, buf, &mib_buf,
+ WSM_WRITE_MIB_REQ_ID, WSM_CMD_TIMEOUT);
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+static int wsm_write_mib_confirm(struct cw1200_common *priv,
+ struct wsm_mib *arg,
+ struct wsm_buf *buf)
+{
+ int ret;
+
+ ret = wsm_generic_confirm(priv, arg, buf);
+ if (ret)
+ return ret;
+
+ if (arg->mib_id == WSM_MIB_ID_OPERATIONAL_POWER_MODE) {
+ /* OperationalMode: update PM status. */
+ const char *p = arg->buf;
+ cw1200_enable_powersave(priv, (p[0] & 0x0F) ? true : false);
+ }
+ return 0;
+}
+
+/* ******************************************************************** */
+
+int wsm_scan(struct cw1200_common *priv, const struct wsm_scan *arg)
+{
+ int i;
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+ if (arg->num_channels > 48)
+ return -EINVAL;
+
+ if (arg->num_ssids > 2)
+ return -EINVAL;
+
+ if (arg->band > 1)
+ return -EINVAL;
+
+ wsm_cmd_lock(priv);
+
+ WSM_PUT8(buf, arg->band);
+ WSM_PUT8(buf, arg->type);
+ WSM_PUT8(buf, arg->flags);
+ WSM_PUT8(buf, arg->max_tx_rate);
+ WSM_PUT32(buf, arg->auto_scan_interval);
+ WSM_PUT8(buf, arg->num_probes);
+ WSM_PUT8(buf, arg->num_channels);
+ WSM_PUT8(buf, arg->num_ssids);
+ WSM_PUT8(buf, arg->probe_delay);
+
+ for (i = 0; i < arg->num_channels; ++i) {
+ WSM_PUT16(buf, arg->ch[i].number);
+ WSM_PUT16(buf, 0);
+ WSM_PUT32(buf, arg->ch[i].min_chan_time);
+ WSM_PUT32(buf, arg->ch[i].max_chan_time);
+ WSM_PUT32(buf, 0);
+ }
+
+ for (i = 0; i < arg->num_ssids; ++i) {
+ WSM_PUT32(buf, arg->ssids[i].length);
+ WSM_PUT(buf, &arg->ssids[i].ssid[0],
+ sizeof(arg->ssids[i].ssid));
+ }
+
+ ret = wsm_cmd_send(priv, buf, NULL,
+ WSM_START_SCAN_REQ_ID, WSM_CMD_TIMEOUT);
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_stop_scan(struct cw1200_common *priv)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+ wsm_cmd_lock(priv);
+ ret = wsm_cmd_send(priv, buf, NULL,
+ WSM_STOP_SCAN_REQ_ID, WSM_CMD_TIMEOUT);
+ wsm_cmd_unlock(priv);
+ return ret;
+}
+
+
+static int wsm_tx_confirm(struct cw1200_common *priv,
+ struct wsm_buf *buf,
+ int link_id)
+{
+ struct wsm_tx_confirm tx_confirm;
+
+ tx_confirm.packet_id = WSM_GET32(buf);
+ tx_confirm.status = WSM_GET32(buf);
+ tx_confirm.tx_rate = WSM_GET8(buf);
+ tx_confirm.ack_failures = WSM_GET8(buf);
+ tx_confirm.flags = WSM_GET16(buf);
+ tx_confirm.media_delay = WSM_GET32(buf);
+ tx_confirm.tx_queue_delay = WSM_GET32(buf);
+
+ cw1200_tx_confirm_cb(priv, link_id, &tx_confirm);
+ return 0;
+
+underflow:
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+static int wsm_multi_tx_confirm(struct cw1200_common *priv,
+ struct wsm_buf *buf, int link_id)
+{
+ int ret;
+ int count;
+ int i;
+
+ count = WSM_GET32(buf);
+ if (WARN_ON(count <= 0))
+ return -EINVAL;
+
+ if (count > 1) {
+ /* We already released one buffer, now for the rest */
+ ret = wsm_release_tx_buffer(priv, count - 1);
+ if (ret < 0)
+ return ret;
+ else if (ret > 0)
+ cw1200_bh_wakeup(priv);
+ }
+
+ cw1200_debug_txed_multi(priv, count);
+ for (i = 0; i < count; ++i) {
+ ret = wsm_tx_confirm(priv, buf, link_id);
+ if (ret)
+ return ret;
+ }
+ return ret;
+
+underflow:
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+/* ******************************************************************** */
+
+static int wsm_join_confirm(struct cw1200_common *priv,
+ struct wsm_join_cnf *arg,
+ struct wsm_buf *buf)
+{
+ arg->status = WSM_GET32(buf);
+ if (WARN_ON(arg->status) != WSM_STATUS_SUCCESS)
+ return -EINVAL;
+
+ arg->min_power_level = WSM_GET32(buf);
+ arg->max_power_level = WSM_GET32(buf);
+
+ return 0;
+
+underflow:
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+int wsm_join(struct cw1200_common *priv, struct wsm_join *arg)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+ struct wsm_join_cnf resp;
+ wsm_cmd_lock(priv);
+
+ WSM_PUT8(buf, arg->mode);
+ WSM_PUT8(buf, arg->band);
+ WSM_PUT16(buf, arg->channel_number);
+ WSM_PUT(buf, &arg->bssid[0], sizeof(arg->bssid));
+ WSM_PUT16(buf, arg->atim_window);
+ WSM_PUT8(buf, arg->preamble_type);
+ WSM_PUT8(buf, arg->probe_for_join);
+ WSM_PUT8(buf, arg->dtim_period);
+ WSM_PUT8(buf, arg->flags);
+ WSM_PUT32(buf, arg->ssid_len);
+ WSM_PUT(buf, &arg->ssid[0], sizeof(arg->ssid));
+ WSM_PUT32(buf, arg->beacon_interval);
+ WSM_PUT32(buf, arg->basic_rate_set);
+
+ priv->tx_burst_idx = -1;
+ ret = wsm_cmd_send(priv, buf, &resp,
+ WSM_JOIN_REQ_ID, WSM_CMD_TIMEOUT);
+ /* TODO: Update state based on resp.min|max_power_level */
+
+ priv->join_complete_status = resp.status;
+
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_set_bss_params(struct cw1200_common *priv,
+ const struct wsm_set_bss_params *arg)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+ wsm_cmd_lock(priv);
+
+ WSM_PUT8(buf, (arg->reset_beacon_loss ? 0x1 : 0));
+ WSM_PUT8(buf, arg->beacon_lost_count);
+ WSM_PUT16(buf, arg->aid);
+ WSM_PUT32(buf, arg->operational_rate_set);
+
+ ret = wsm_cmd_send(priv, buf, NULL,
+ WSM_SET_BSS_PARAMS_REQ_ID, WSM_CMD_TIMEOUT);
+
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_add_key(struct cw1200_common *priv, const struct wsm_add_key *arg)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+ wsm_cmd_lock(priv);
+
+ WSM_PUT(buf, arg, sizeof(*arg));
+
+ ret = wsm_cmd_send(priv, buf, NULL,
+ WSM_ADD_KEY_REQ_ID, WSM_CMD_TIMEOUT);
+
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_remove_key(struct cw1200_common *priv, const struct wsm_remove_key *arg)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+ wsm_cmd_lock(priv);
+
+ WSM_PUT8(buf, arg->index);
+ WSM_PUT8(buf, 0);
+ WSM_PUT16(buf, 0);
+
+ ret = wsm_cmd_send(priv, buf, NULL,
+ WSM_REMOVE_KEY_REQ_ID, WSM_CMD_TIMEOUT);
+
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_set_tx_queue_params(struct cw1200_common *priv,
+ const struct wsm_set_tx_queue_params *arg, u8 id)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+ u8 queue_id_to_wmm_aci[] = {3, 2, 0, 1};
+
+ wsm_cmd_lock(priv);
+
+ WSM_PUT8(buf, queue_id_to_wmm_aci[id]);
+ WSM_PUT8(buf, 0);
+ WSM_PUT8(buf, arg->ackPolicy);
+ WSM_PUT8(buf, 0);
+ WSM_PUT32(buf, arg->maxTransmitLifetime);
+ WSM_PUT16(buf, arg->allowedMediumTime);
+ WSM_PUT16(buf, 0);
+
+ ret = wsm_cmd_send(priv, buf, NULL, 0x0012, WSM_CMD_TIMEOUT);
+
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_set_edca_params(struct cw1200_common *priv,
+ const struct wsm_edca_params *arg)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+ wsm_cmd_lock(priv);
+
+ /* Implemented according to specification. */
+
+ WSM_PUT16(buf, arg->params[3].cwmin);
+ WSM_PUT16(buf, arg->params[2].cwmin);
+ WSM_PUT16(buf, arg->params[1].cwmin);
+ WSM_PUT16(buf, arg->params[0].cwmin);
+
+ WSM_PUT16(buf, arg->params[3].cwmax);
+ WSM_PUT16(buf, arg->params[2].cwmax);
+ WSM_PUT16(buf, arg->params[1].cwmax);
+ WSM_PUT16(buf, arg->params[0].cwmax);
+
+ WSM_PUT8(buf, arg->params[3].aifns);
+ WSM_PUT8(buf, arg->params[2].aifns);
+ WSM_PUT8(buf, arg->params[1].aifns);
+ WSM_PUT8(buf, arg->params[0].aifns);
+
+ WSM_PUT16(buf, arg->params[3].txop_limit);
+ WSM_PUT16(buf, arg->params[2].txop_limit);
+ WSM_PUT16(buf, arg->params[1].txop_limit);
+ WSM_PUT16(buf, arg->params[0].txop_limit);
+
+ WSM_PUT32(buf, arg->params[3].max_rx_lifetime);
+ WSM_PUT32(buf, arg->params[2].max_rx_lifetime);
+ WSM_PUT32(buf, arg->params[1].max_rx_lifetime);
+ WSM_PUT32(buf, arg->params[0].max_rx_lifetime);
+
+ ret = wsm_cmd_send(priv, buf, NULL,
+ WSM_EDCA_PARAMS_REQ_ID, WSM_CMD_TIMEOUT);
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_switch_channel(struct cw1200_common *priv,
+ const struct wsm_switch_channel *arg)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+ wsm_cmd_lock(priv);
+
+ WSM_PUT8(buf, arg->mode);
+ WSM_PUT8(buf, arg->switch_count);
+ WSM_PUT16(buf, arg->channel_number);
+
+ priv->channel_switch_in_progress = 1;
+
+ ret = wsm_cmd_send(priv, buf, NULL,
+ WSM_SWITCH_CHANNEL_REQ_ID, WSM_CMD_TIMEOUT);
+ if (ret)
+ priv->channel_switch_in_progress = 0;
+
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_set_pm(struct cw1200_common *priv, const struct wsm_set_pm *arg)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+ priv->ps_mode_switch_in_progress = 1;
+
+ wsm_cmd_lock(priv);
+
+ WSM_PUT8(buf, arg->mode);
+ WSM_PUT8(buf, arg->fast_psm_idle_period);
+ WSM_PUT8(buf, arg->ap_psm_change_period);
+ WSM_PUT8(buf, arg->min_auto_pspoll_period);
+
+ ret = wsm_cmd_send(priv, buf, NULL,
+ WSM_SET_PM_REQ_ID, WSM_CMD_TIMEOUT);
+
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_start(struct cw1200_common *priv, const struct wsm_start *arg)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+ wsm_cmd_lock(priv);
+
+ WSM_PUT8(buf, arg->mode);
+ WSM_PUT8(buf, arg->band);
+ WSM_PUT16(buf, arg->channel_number);
+ WSM_PUT32(buf, arg->ct_window);
+ WSM_PUT32(buf, arg->beacon_interval);
+ WSM_PUT8(buf, arg->dtim_period);
+ WSM_PUT8(buf, arg->preamble);
+ WSM_PUT8(buf, arg->probe_delay);
+ WSM_PUT8(buf, arg->ssid_len);
+ WSM_PUT(buf, arg->ssid, sizeof(arg->ssid));
+ WSM_PUT32(buf, arg->basic_rate_set);
+
+ priv->tx_burst_idx = -1;
+ ret = wsm_cmd_send(priv, buf, NULL,
+ WSM_START_REQ_ID, WSM_CMD_START_TIMEOUT);
+
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_beacon_transmit(struct cw1200_common *priv,
+ const struct wsm_beacon_transmit *arg)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+ wsm_cmd_lock(priv);
+
+ WSM_PUT32(buf, arg->enable_beaconing ? 1 : 0);
+
+ ret = wsm_cmd_send(priv, buf, NULL,
+ WSM_BEACON_TRANSMIT_REQ_ID, WSM_CMD_TIMEOUT);
+
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_start_find(struct cw1200_common *priv)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+ wsm_cmd_lock(priv);
+ ret = wsm_cmd_send(priv, buf, NULL, 0x0019, WSM_CMD_TIMEOUT);
+ wsm_cmd_unlock(priv);
+ return ret;
+}
+
+/* ******************************************************************** */
+
+int wsm_stop_find(struct cw1200_common *priv)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+ wsm_cmd_lock(priv);
+ ret = wsm_cmd_send(priv, buf, NULL, 0x001A, WSM_CMD_TIMEOUT);
+ wsm_cmd_unlock(priv);
+ return ret;
+}
+
+/* ******************************************************************** */
+
+int wsm_map_link(struct cw1200_common *priv, const struct wsm_map_link *arg)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+ u16 cmd = 0x001C | WSM_TX_LINK_ID(arg->link_id);
+
+ wsm_cmd_lock(priv);
+
+ WSM_PUT(buf, &arg->mac_addr[0], sizeof(arg->mac_addr));
+ WSM_PUT16(buf, 0);
+
+ ret = wsm_cmd_send(priv, buf, NULL, cmd, WSM_CMD_TIMEOUT);
+
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_update_ie(struct cw1200_common *priv,
+ const struct wsm_update_ie *arg)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+ wsm_cmd_lock(priv);
+
+ WSM_PUT16(buf, arg->what);
+ WSM_PUT16(buf, arg->count);
+ WSM_PUT(buf, arg->ies, arg->length);
+
+ ret = wsm_cmd_send(priv, buf, NULL, 0x001B, WSM_CMD_TIMEOUT);
+
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+/* ******************************************************************** */
+int wsm_set_probe_responder(struct cw1200_common *priv, bool enable)
+{
+ priv->rx_filter.probeResponder = enable;
+ return wsm_set_rx_filter(priv, &priv->rx_filter);
+}
+
+/* ******************************************************************** */
+/* WSM indication events implementation */
+const char * const cw1200_fw_types[] = {
+ "ETF",
+ "WFM",
+ "WSM",
+ "HI test",
+ "Platform test"
+};
+
+static int wsm_startup_indication(struct cw1200_common *priv,
+ struct wsm_buf *buf)
+{
+ priv->wsm_caps.input_buffers = WSM_GET16(buf);
+ priv->wsm_caps.input_buffer_size = WSM_GET16(buf);
+ priv->wsm_caps.hw_id = WSM_GET16(buf);
+ priv->wsm_caps.hw_subid = WSM_GET16(buf);
+ priv->wsm_caps.status = WSM_GET16(buf);
+ priv->wsm_caps.fw_cap = WSM_GET16(buf);
+ priv->wsm_caps.fw_type = WSM_GET16(buf);
+ priv->wsm_caps.fw_api = WSM_GET16(buf);
+ priv->wsm_caps.fw_build = WSM_GET16(buf);
+ priv->wsm_caps.fw_ver = WSM_GET16(buf);
+ WSM_GET(buf, priv->wsm_caps.fw_label, sizeof(priv->wsm_caps.fw_label));
+ priv->wsm_caps.fw_label[sizeof(priv->wsm_caps.fw_label) - 1] = 0; /* Do not trust FW too much... */
+
+ if (WARN_ON(priv->wsm_caps.status))
+ return -EINVAL;
+
+ if (WARN_ON(priv->wsm_caps.fw_type > 4))
+ return -EINVAL;
+
+ pr_info("CW1200 WSM init done.\n"
+ " Input buffers: %d x %d bytes\n"
+ " Hardware: %d.%d\n"
+ " %s firmware [%s], ver: %d, build: %d,"
+ " api: %d, cap: 0x%.4X\n",
+ priv->wsm_caps.input_buffers,
+ priv->wsm_caps.input_buffer_size,
+ priv->wsm_caps.hw_id, priv->wsm_caps.hw_subid,
+ cw1200_fw_types[priv->wsm_caps.fw_type],
+ priv->wsm_caps.fw_label, priv->wsm_caps.fw_ver,
+ priv->wsm_caps.fw_build,
+ priv->wsm_caps.fw_api, priv->wsm_caps.fw_cap);
+
+ /* Disable unsupported frequency bands */
+ if (!(priv->wsm_caps.fw_cap & 0x1))
+ priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
+ if (!(priv->wsm_caps.fw_cap & 0x2))
+ priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
+
+ priv->firmware_ready = 1;
+ wake_up(&priv->wsm_startup_done);
+ return 0;
+
+underflow:
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+static int wsm_receive_indication(struct cw1200_common *priv,
+ int link_id,
+ struct wsm_buf *buf,
+ struct sk_buff **skb_p)
+{
+ struct wsm_rx rx;
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len;
+ __le16 fctl;
+
+ rx.status = WSM_GET32(buf);
+ rx.channel_number = WSM_GET16(buf);
+ rx.rx_rate = WSM_GET8(buf);
+ rx.rcpi_rssi = WSM_GET8(buf);
+ rx.flags = WSM_GET32(buf);
+
+ /* FW Workaround: Drop probe resp or
+ beacon when RSSI is 0
+ */
+ hdr = (struct ieee80211_hdr *)(*skb_p)->data;
+
+ if (!rx.rcpi_rssi &&
+ (ieee80211_is_probe_resp(hdr->frame_control) ||
+ ieee80211_is_beacon(hdr->frame_control)))
+ return 0;
+
+ /* If no RSSI subscription has been made,
+ * convert RCPI to RSSI here
+ */
+ if (!priv->cqm_use_rssi)
+ rx.rcpi_rssi = rx.rcpi_rssi / 2 - 110;
+
+ fctl = *(__le16 *)buf->data;
+ hdr_len = buf->data - buf->begin;
+ skb_pull(*skb_p, hdr_len);
+ if (!rx.status && ieee80211_is_deauth(fctl)) {
+ if (priv->join_status == CW1200_JOIN_STATUS_STA) {
+ /* Shedule unjoin work */
+ pr_debug("[WSM] Issue unjoin command (RX).\n");
+ wsm_lock_tx_async(priv);
+ if (queue_work(priv->workqueue,
+ &priv->unjoin_work) <= 0)
+ wsm_unlock_tx(priv);
+ }
+ }
+ cw1200_rx_cb(priv, &rx, link_id, skb_p);
+ if (*skb_p)
+ skb_push(*skb_p, hdr_len);
+
+ return 0;
+
+underflow:
+ return -EINVAL;
+}
+
+static int wsm_event_indication(struct cw1200_common *priv, struct wsm_buf *buf)
+{
+ int first;
+ struct cw1200_wsm_event *event;
+
+ if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) {
+ /* STA is stopped. */
+ return 0;
+ }
+
+ event = kzalloc(sizeof(struct cw1200_wsm_event), GFP_KERNEL);
+ if (!event)
+ return -ENOMEM;
+
+ event->evt.id = WSM_GET32(buf);
+ event->evt.data = WSM_GET32(buf);
+
+ pr_debug("[WSM] Event: %d(%d)\n",
+ event->evt.id, event->evt.data);
+
+ spin_lock(&priv->event_queue_lock);
+ first = list_empty(&priv->event_queue);
+ list_add_tail(&event->link, &priv->event_queue);
+ spin_unlock(&priv->event_queue_lock);
+
+ if (first)
+ queue_work(priv->workqueue, &priv->event_handler);
+
+ return 0;
+
+underflow:
+ kfree(event);
+ return -EINVAL;
+}
+
+static int wsm_channel_switch_indication(struct cw1200_common *priv,
+ struct wsm_buf *buf)
+{
+ WARN_ON(WSM_GET32(buf));
+
+ priv->channel_switch_in_progress = 0;
+ wake_up(&priv->channel_switch_done);
+
+ wsm_unlock_tx(priv);
+
+ return 0;
+
+underflow:
+ return -EINVAL;
+}
+
+static int wsm_set_pm_indication(struct cw1200_common *priv,
+ struct wsm_buf *buf)
+{
+ /* TODO: Check buf (struct wsm_set_pm_complete) for validity */
+ if (priv->ps_mode_switch_in_progress) {
+ priv->ps_mode_switch_in_progress = 0;
+ wake_up(&priv->ps_mode_switch_done);
+ }
+ return 0;
+}
+
+static int wsm_scan_started(struct cw1200_common *priv, void *arg,
+ struct wsm_buf *buf)
+{
+ u32 status = WSM_GET32(buf);
+ if (status != WSM_STATUS_SUCCESS) {
+ cw1200_scan_failed_cb(priv);
+ return -EINVAL;
+ }
+ return 0;
+
+underflow:
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+static int wsm_scan_complete_indication(struct cw1200_common *priv,
+ struct wsm_buf *buf)
+{
+ struct wsm_scan_complete arg;
+ arg.status = WSM_GET32(buf);
+ arg.psm = WSM_GET8(buf);
+ arg.num_channels = WSM_GET8(buf);
+ cw1200_scan_complete_cb(priv, &arg);
+
+ return 0;
+
+underflow:
+ return -EINVAL;
+}
+
+static int wsm_join_complete_indication(struct cw1200_common *priv,
+ struct wsm_buf *buf)
+{
+ struct wsm_join_complete arg;
+ arg.status = WSM_GET32(buf);
+ pr_debug("[WSM] Join complete indication, status: %d\n", arg.status);
+ cw1200_join_complete_cb(priv, &arg);
+
+ return 0;
+
+underflow:
+ return -EINVAL;
+}
+
+static int wsm_find_complete_indication(struct cw1200_common *priv,
+ struct wsm_buf *buf)
+{
+ pr_warn("Implement find_complete_indication\n");
+ return 0;
+}
+
+static int wsm_ba_timeout_indication(struct cw1200_common *priv,
+ struct wsm_buf *buf)
+{
+ u32 dummy;
+ u8 tid;
+ u8 dummy2;
+ u8 addr[ETH_ALEN];
+
+ dummy = WSM_GET32(buf);
+ tid = WSM_GET8(buf);
+ dummy2 = WSM_GET8(buf);
+ WSM_GET(buf, addr, ETH_ALEN);
+
+ pr_info("BlockACK timeout, tid %d, addr %pM\n",
+ tid, addr);
+
+ return 0;
+
+underflow:
+ return -EINVAL;
+}
+
+static int wsm_suspend_resume_indication(struct cw1200_common *priv,
+ int link_id, struct wsm_buf *buf)
+{
+ u32 flags;
+ struct wsm_suspend_resume arg;
+
+ flags = WSM_GET32(buf);
+ arg.link_id = link_id;
+ arg.stop = !(flags & 1);
+ arg.multicast = !!(flags & 8);
+ arg.queue = (flags >> 1) & 3;
+
+ cw1200_suspend_resume(priv, &arg);
+
+ return 0;
+
+underflow:
+ return -EINVAL;
+}
+
+
+/* ******************************************************************** */
+/* WSM TX */
+
+static int wsm_cmd_send(struct cw1200_common *priv,
+ struct wsm_buf *buf,
+ void *arg, u16 cmd, long tmo)
+{
+ size_t buf_len = buf->data - buf->begin;
+ int ret;
+
+ /* Don't bother if we're dead. */
+ if (priv->bh_error) {
+ ret = 0;
+ goto done;
+ }
+
+ /* Block until the cmd buffer is completed. Tortuous. */
+ spin_lock(&priv->wsm_cmd.lock);
+ while (!priv->wsm_cmd.done) {
+ spin_unlock(&priv->wsm_cmd.lock);
+ spin_lock(&priv->wsm_cmd.lock);
+ }
+ priv->wsm_cmd.done = 0;
+ spin_unlock(&priv->wsm_cmd.lock);
+
+ if (cmd == WSM_WRITE_MIB_REQ_ID ||
+ cmd == WSM_READ_MIB_REQ_ID)
+ pr_debug("[WSM] >>> 0x%.4X [MIB: 0x%.4X] (%zu)\n",
+ cmd, __le16_to_cpu(((__le16 *)buf->begin)[2]),
+ buf_len);
+ else
+ pr_debug("[WSM] >>> 0x%.4X (%zu)\n", cmd, buf_len);
+
+ /* Due to buggy SPI on CW1200, we need to
+ * pad the message by a few bytes to ensure
+ * that it's completely received.
+ */
+ buf_len += 4;
+
+ /* Fill HI message header */
+ /* BH will add sequence number */
+ ((__le16 *)buf->begin)[0] = __cpu_to_le16(buf_len);
+ ((__le16 *)buf->begin)[1] = __cpu_to_le16(cmd);
+
+ spin_lock(&priv->wsm_cmd.lock);
+ BUG_ON(priv->wsm_cmd.ptr);
+ priv->wsm_cmd.ptr = buf->begin;
+ priv->wsm_cmd.len = buf_len;
+ priv->wsm_cmd.arg = arg;
+ priv->wsm_cmd.cmd = cmd;
+ spin_unlock(&priv->wsm_cmd.lock);
+
+ cw1200_bh_wakeup(priv);
+
+ /* Wait for command completion */
+ ret = wait_event_timeout(priv->wsm_cmd_wq,
+ priv->wsm_cmd.done, tmo);
+
+ if (!ret && !priv->wsm_cmd.done) {
+ spin_lock(&priv->wsm_cmd.lock);
+ priv->wsm_cmd.done = 1;
+ priv->wsm_cmd.ptr = NULL;
+ spin_unlock(&priv->wsm_cmd.lock);
+ if (priv->bh_error) {
+ /* Return ok to help system cleanup */
+ ret = 0;
+ } else {
+ pr_err("CMD req (0x%04x) stuck in firmware, killing BH\n", priv->wsm_cmd.cmd);
+ print_hex_dump_bytes("REQDUMP: ", DUMP_PREFIX_NONE,
+ buf->begin, buf_len);
+ pr_err("Outstanding outgoing frames: %d\n", priv->hw_bufs_used);
+
+ /* Kill BH thread to report the error to the top layer. */
+ atomic_add(1, &priv->bh_term);
+ wake_up(&priv->bh_wq);
+ ret = -ETIMEDOUT;
+ }
+ } else {
+ spin_lock(&priv->wsm_cmd.lock);
+ BUG_ON(!priv->wsm_cmd.done);
+ ret = priv->wsm_cmd.ret;
+ spin_unlock(&priv->wsm_cmd.lock);
+ }
+done:
+ wsm_buf_reset(buf);
+ return ret;
+}
+
+/* ******************************************************************** */
+/* WSM TX port control */
+
+void wsm_lock_tx(struct cw1200_common *priv)
+{
+ wsm_cmd_lock(priv);
+ if (atomic_add_return(1, &priv->tx_lock) == 1) {
+ if (wsm_flush_tx(priv))
+ pr_debug("[WSM] TX is locked.\n");
+ }
+ wsm_cmd_unlock(priv);
+}
+
+void wsm_lock_tx_async(struct cw1200_common *priv)
+{
+ if (atomic_add_return(1, &priv->tx_lock) == 1)
+ pr_debug("[WSM] TX is locked (async).\n");
+}
+
+bool wsm_flush_tx(struct cw1200_common *priv)
+{
+ unsigned long timestamp = jiffies;
+ bool pending = false;
+ long timeout;
+ int i;
+
+ /* Flush must be called with TX lock held. */
+ BUG_ON(!atomic_read(&priv->tx_lock));
+
+ /* First check if we really need to do something.
+ * It is safe to use unprotected access, as hw_bufs_used
+ * can only decrements.
+ */
+ if (!priv->hw_bufs_used)
+ return true;
+
+ if (priv->bh_error) {
+ /* In case of failure do not wait for magic. */
+ pr_err("[WSM] Fatal error occured, will not flush TX.\n");
+ return false;
+ } else {
+ /* Get a timestamp of "oldest" frame */
+ for (i = 0; i < 4; ++i)
+ pending |= cw1200_queue_get_xmit_timestamp(
+ &priv->tx_queue[i],
+ &timestamp, 0xffffffff);
+ /* If there's nothing pending, we're good */
+ if (!pending)
+ return true;
+
+ timeout = timestamp + WSM_CMD_LAST_CHANCE_TIMEOUT - jiffies;
+ if (timeout < 0 || wait_event_timeout(priv->bh_evt_wq,
+ !priv->hw_bufs_used,
+ timeout) <= 0) {
+ /* Hmmm... Not good. Frame had stuck in firmware. */
+ priv->bh_error = 1;
+ wiphy_err(priv->hw->wiphy, "[WSM] TX Frames (%d) stuck in firmware, killing BH\n", priv->hw_bufs_used);
+ wake_up(&priv->bh_wq);
+ return false;
+ }
+
+ /* Ok, everything is flushed. */
+ return true;
+ }
+}
+
+void wsm_unlock_tx(struct cw1200_common *priv)
+{
+ int tx_lock;
+ tx_lock = atomic_sub_return(1, &priv->tx_lock);
+ BUG_ON(tx_lock < 0);
+
+ if (tx_lock == 0) {
+ if (!priv->bh_error)
+ cw1200_bh_wakeup(priv);
+ pr_debug("[WSM] TX is unlocked.\n");
+ }
+}
+
+/* ******************************************************************** */
+/* WSM RX */
+
+int wsm_handle_exception(struct cw1200_common *priv, u8 *data, size_t len)
+{
+ struct wsm_buf buf;
+ u32 reason;
+ u32 reg[18];
+ char fname[48];
+ unsigned int i;
+
+ static const char * const reason_str[] = {
+ "undefined instruction",
+ "prefetch abort",
+ "data abort",
+ "unknown error",
+ };
+
+ buf.begin = buf.data = data;
+ buf.end = &buf.begin[len];
+
+ reason = WSM_GET32(&buf);
+ for (i = 0; i < ARRAY_SIZE(reg); ++i)
+ reg[i] = WSM_GET32(&buf);
+ WSM_GET(&buf, fname, sizeof(fname));
+
+ if (reason < 4)
+ wiphy_err(priv->hw->wiphy,
+ "Firmware exception: %s.\n",
+ reason_str[reason]);
+ else
+ wiphy_err(priv->hw->wiphy,
+ "Firmware assert at %.*s, line %d\n",
+ (int) sizeof(fname), fname, reg[1]);
+
+ for (i = 0; i < 12; i += 4)
+ wiphy_err(priv->hw->wiphy,
+ "R%d: 0x%.8X, R%d: 0x%.8X, R%d: 0x%.8X, R%d: 0x%.8X,\n",
+ i + 0, reg[i + 0], i + 1, reg[i + 1],
+ i + 2, reg[i + 2], i + 3, reg[i + 3]);
+ wiphy_err(priv->hw->wiphy,
+ "R12: 0x%.8X, SP: 0x%.8X, LR: 0x%.8X, PC: 0x%.8X,\n",
+ reg[i + 0], reg[i + 1], reg[i + 2], reg[i + 3]);
+ i += 4;
+ wiphy_err(priv->hw->wiphy,
+ "CPSR: 0x%.8X, SPSR: 0x%.8X\n",
+ reg[i + 0], reg[i + 1]);
+
+ print_hex_dump_bytes("R1: ", DUMP_PREFIX_NONE,
+ fname, sizeof(fname));
+ return 0;
+
+underflow:
+ wiphy_err(priv->hw->wiphy, "Firmware exception.\n");
+ print_hex_dump_bytes("Exception: ", DUMP_PREFIX_NONE,
+ data, len);
+ return -EINVAL;
+}
+
+int wsm_handle_rx(struct cw1200_common *priv, u16 id,
+ struct wsm_hdr *wsm, struct sk_buff **skb_p)
+{
+ int ret = 0;
+ struct wsm_buf wsm_buf;
+ int link_id = (id >> 6) & 0x0F;
+
+ /* Strip link id. */
+ id &= ~WSM_TX_LINK_ID(WSM_TX_LINK_ID_MAX);
+
+ wsm_buf.begin = (u8 *)&wsm[0];
+ wsm_buf.data = (u8 *)&wsm[1];
+ wsm_buf.end = &wsm_buf.begin[__le16_to_cpu(wsm->len)];
+
+ pr_debug("[WSM] <<< 0x%.4X (%td)\n", id,
+ wsm_buf.end - wsm_buf.begin);
+
+ if (id == WSM_TX_CONFIRM_IND_ID) {
+ ret = wsm_tx_confirm(priv, &wsm_buf, link_id);
+ } else if (id == WSM_MULTI_TX_CONFIRM_ID) {
+ ret = wsm_multi_tx_confirm(priv, &wsm_buf, link_id);
+ } else if (id & 0x0400) {
+ void *wsm_arg;
+ u16 wsm_cmd;
+
+ /* Do not trust FW too much. Protection against repeated
+ * response and race condition removal (see above).
+ */
+ spin_lock(&priv->wsm_cmd.lock);
+ wsm_arg = priv->wsm_cmd.arg;
+ wsm_cmd = priv->wsm_cmd.cmd &
+ ~WSM_TX_LINK_ID(WSM_TX_LINK_ID_MAX);
+ priv->wsm_cmd.cmd = 0xFFFF;
+ spin_unlock(&priv->wsm_cmd.lock);
+
+ if (WARN_ON((id & ~0x0400) != wsm_cmd)) {
+ /* Note that any non-zero is a fatal retcode. */
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Note that wsm_arg can be NULL in case of timeout in
+ * wsm_cmd_send().
+ */
+
+ switch (id) {
+ case WSM_READ_MIB_RESP_ID:
+ if (wsm_arg)
+ ret = wsm_read_mib_confirm(priv, wsm_arg,
+ &wsm_buf);
+ break;
+ case WSM_WRITE_MIB_RESP_ID:
+ if (wsm_arg)
+ ret = wsm_write_mib_confirm(priv, wsm_arg,
+ &wsm_buf);
+ break;
+ case WSM_START_SCAN_RESP_ID:
+ if (wsm_arg)
+ ret = wsm_scan_started(priv, wsm_arg, &wsm_buf);
+ break;
+ case WSM_CONFIGURATION_RESP_ID:
+ if (wsm_arg)
+ ret = wsm_configuration_confirm(priv, wsm_arg,
+ &wsm_buf);
+ break;
+ case WSM_JOIN_RESP_ID:
+ if (wsm_arg)
+ ret = wsm_join_confirm(priv, wsm_arg, &wsm_buf);
+ break;
+ case WSM_STOP_SCAN_RESP_ID:
+ case WSM_RESET_RESP_ID:
+ case WSM_ADD_KEY_RESP_ID:
+ case WSM_REMOVE_KEY_RESP_ID:
+ case WSM_SET_PM_RESP_ID:
+ case WSM_SET_BSS_PARAMS_RESP_ID:
+ case 0x0412: /* set_tx_queue_params */
+ case WSM_EDCA_PARAMS_RESP_ID:
+ case WSM_SWITCH_CHANNEL_RESP_ID:
+ case WSM_START_RESP_ID:
+ case WSM_BEACON_TRANSMIT_RESP_ID:
+ case 0x0419: /* start_find */
+ case 0x041A: /* stop_find */
+ case 0x041B: /* update_ie */
+ case 0x041C: /* map_link */
+ WARN_ON(wsm_arg != NULL);
+ ret = wsm_generic_confirm(priv, wsm_arg, &wsm_buf);
+ if (ret) {
+ wiphy_warn(priv->hw->wiphy,
+ "wsm_generic_confirm failed for request 0x%04x.\n",
+ id & ~0x0400);
+
+ /* often 0x407 and 0x410 occur, this means we're dead.. */
+ if (priv->join_status >= CW1200_JOIN_STATUS_JOINING) {
+ wsm_lock_tx(priv);
+ if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0)
+ wsm_unlock_tx(priv);
+ }
+ }
+ break;
+ default:
+ wiphy_warn(priv->hw->wiphy,
+ "Unrecognized confirmation 0x%04x\n",
+ id & ~0x0400);
+ }
+
+ spin_lock(&priv->wsm_cmd.lock);
+ priv->wsm_cmd.ret = ret;
+ priv->wsm_cmd.done = 1;
+ spin_unlock(&priv->wsm_cmd.lock);
+
+ ret = 0; /* Error response from device should ne stop BH. */
+
+ wake_up(&priv->wsm_cmd_wq);
+ } else if (id & 0x0800) {
+ switch (id) {
+ case WSM_STARTUP_IND_ID:
+ ret = wsm_startup_indication(priv, &wsm_buf);
+ break;
+ case WSM_RECEIVE_IND_ID:
+ ret = wsm_receive_indication(priv, link_id,
+ &wsm_buf, skb_p);
+ break;
+ case 0x0805:
+ ret = wsm_event_indication(priv, &wsm_buf);
+ break;
+ case WSM_SCAN_COMPLETE_IND_ID:
+ ret = wsm_scan_complete_indication(priv, &wsm_buf);
+ break;
+ case 0x0808:
+ ret = wsm_ba_timeout_indication(priv, &wsm_buf);
+ break;
+ case 0x0809:
+ ret = wsm_set_pm_indication(priv, &wsm_buf);
+ break;
+ case 0x080A:
+ ret = wsm_channel_switch_indication(priv, &wsm_buf);
+ break;
+ case 0x080B:
+ ret = wsm_find_complete_indication(priv, &wsm_buf);
+ break;
+ case 0x080C:
+ ret = wsm_suspend_resume_indication(priv,
+ link_id, &wsm_buf);
+ break;
+ case 0x080F:
+ ret = wsm_join_complete_indication(priv, &wsm_buf);
+ break;
+ default:
+ pr_warn("Unrecognised WSM ID %04x\n", id);
+ }
+ } else {
+ WARN_ON(1);
+ ret = -EINVAL;
+ }
+out:
+ return ret;
+}
+
+static bool wsm_handle_tx_data(struct cw1200_common *priv,
+ struct wsm_tx *wsm,
+ const struct ieee80211_tx_info *tx_info,
+ const struct cw1200_txpriv *txpriv,
+ struct cw1200_queue *queue)
+{
+ bool handled = false;
+ const struct ieee80211_hdr *frame =
+ (struct ieee80211_hdr *)&((u8 *)wsm)[txpriv->offset];
+ __le16 fctl = frame->frame_control;
+ enum {
+ do_probe,
+ do_drop,
+ do_wep,
+ do_tx,
+ } action = do_tx;
+
+ switch (priv->mode) {
+ case NL80211_IFTYPE_STATION:
+ if (priv->join_status == CW1200_JOIN_STATUS_MONITOR)
+ action = do_tx;
+ else if (priv->join_status < CW1200_JOIN_STATUS_PRE_STA)
+ action = do_drop;
+ break;
+ case NL80211_IFTYPE_AP:
+ if (!priv->join_status) {
+ action = do_drop;
+ } else if (!(BIT(txpriv->raw_link_id) &
+ (BIT(0) | priv->link_id_map))) {
+ wiphy_warn(priv->hw->wiphy,
+ "A frame with expired link id is dropped.\n");
+ action = do_drop;
+ }
+ if (cw1200_queue_get_generation(wsm->packet_id) >
+ CW1200_MAX_REQUEUE_ATTEMPTS) {
+ /* HACK!!! WSM324 firmware has tendency to requeue
+ * multicast frames in a loop, causing performance
+ * drop and high power consumption of the driver.
+ * In this situation it is better just to drop
+ * the problematic frame.
+ */
+ wiphy_warn(priv->hw->wiphy,
+ "Too many attempts to requeue a frame; dropped.\n");
+ action = do_drop;
+ }
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ if (priv->join_status != CW1200_JOIN_STATUS_IBSS)
+ action = do_drop;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ action = do_tx; /* TODO: Test me! */
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ default:
+ action = do_drop;
+ break;
+ }
+
+ if (action == do_tx) {
+ if (ieee80211_is_nullfunc(fctl)) {
+ spin_lock(&priv->bss_loss_lock);
+ if (priv->bss_loss_state) {
+ priv->bss_loss_confirm_id = wsm->packet_id;
+ wsm->queue_id = WSM_QUEUE_VOICE;
+ }
+ spin_unlock(&priv->bss_loss_lock);
+ } else if (ieee80211_is_probe_req(fctl)) {
+ action = do_probe;
+ } else if (ieee80211_is_deauth(fctl) &&
+ priv->mode != NL80211_IFTYPE_AP) {
+ pr_debug("[WSM] Issue unjoin command due to tx deauth.\n");
+ wsm_lock_tx_async(priv);
+ if (queue_work(priv->workqueue,
+ &priv->unjoin_work) <= 0)
+ wsm_unlock_tx(priv);
+ } else if (ieee80211_has_protected(fctl) &&
+ tx_info->control.hw_key &&
+ tx_info->control.hw_key->keyidx != priv->wep_default_key_id &&
+ (tx_info->control.hw_key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ tx_info->control.hw_key->cipher == WLAN_CIPHER_SUITE_WEP104)) {
+ action = do_wep;
+ }
+ }
+
+ switch (action) {
+ case do_probe:
+ /* An interesting FW "feature". Device filters probe responses.
+ * The easiest way to get it back is to convert
+ * probe request into WSM start_scan command.
+ */
+ pr_debug("[WSM] Convert probe request to scan.\n");
+ wsm_lock_tx_async(priv);
+ priv->pending_frame_id = wsm->packet_id;
+ if (queue_delayed_work(priv->workqueue,
+ &priv->scan.probe_work, 0) <= 0)
+ wsm_unlock_tx(priv);
+ handled = true;
+ break;
+ case do_drop:
+ pr_debug("[WSM] Drop frame (0x%.4X).\n", fctl);
+ BUG_ON(cw1200_queue_remove(queue, wsm->packet_id));
+ handled = true;
+ break;
+ case do_wep:
+ pr_debug("[WSM] Issue set_default_wep_key.\n");
+ wsm_lock_tx_async(priv);
+ priv->wep_default_key_id = tx_info->control.hw_key->keyidx;
+ priv->pending_frame_id = wsm->packet_id;
+ if (queue_work(priv->workqueue, &priv->wep_key_work) <= 0)
+ wsm_unlock_tx(priv);
+ handled = true;
+ break;
+ case do_tx:
+ pr_debug("[WSM] Transmit frame.\n");
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ return handled;
+}
+
+static int cw1200_get_prio_queue(struct cw1200_common *priv,
+ u32 link_id_map, int *total)
+{
+ static const int urgent = BIT(CW1200_LINK_ID_AFTER_DTIM) |
+ BIT(CW1200_LINK_ID_UAPSD);
+ struct wsm_edca_queue_params *edca;
+ unsigned score, best = -1;
+ int winner = -1;
+ int queued;
+ int i;
+
+ /* search for a winner using edca params */
+ for (i = 0; i < 4; ++i) {
+ queued = cw1200_queue_get_num_queued(&priv->tx_queue[i],
+ link_id_map);
+ if (!queued)
+ continue;
+ *total += queued;
+ edca = &priv->edca.params[i];
+ score = ((edca->aifns + edca->cwmin) << 16) +
+ ((edca->cwmax - edca->cwmin) *
+ (get_random_int() & 0xFFFF));
+ if (score < best && (winner < 0 || i != 3)) {
+ best = score;
+ winner = i;
+ }
+ }
+
+ /* override winner if bursting */
+ if (winner >= 0 && priv->tx_burst_idx >= 0 &&
+ winner != priv->tx_burst_idx &&
+ !cw1200_queue_get_num_queued(
+ &priv->tx_queue[winner],
+ link_id_map & urgent) &&
+ cw1200_queue_get_num_queued(
+ &priv->tx_queue[priv->tx_burst_idx],
+ link_id_map))
+ winner = priv->tx_burst_idx;
+
+ return winner;
+}
+
+static int wsm_get_tx_queue_and_mask(struct cw1200_common *priv,
+ struct cw1200_queue **queue_p,
+ u32 *tx_allowed_mask_p,
+ bool *more)
+{
+ int idx;
+ u32 tx_allowed_mask;
+ int total = 0;
+
+ /* Search for a queue with multicast frames buffered */
+ if (priv->tx_multicast) {
+ tx_allowed_mask = BIT(CW1200_LINK_ID_AFTER_DTIM);
+ idx = cw1200_get_prio_queue(priv,
+ tx_allowed_mask, &total);
+ if (idx >= 0) {
+ *more = total > 1;
+ goto found;
+ }
+ }
+
+ /* Search for unicast traffic */
+ tx_allowed_mask = ~priv->sta_asleep_mask;
+ tx_allowed_mask |= BIT(CW1200_LINK_ID_UAPSD);
+ if (priv->sta_asleep_mask) {
+ tx_allowed_mask |= priv->pspoll_mask;
+ tx_allowed_mask &= ~BIT(CW1200_LINK_ID_AFTER_DTIM);
+ } else {
+ tx_allowed_mask |= BIT(CW1200_LINK_ID_AFTER_DTIM);
+ }
+ idx = cw1200_get_prio_queue(priv,
+ tx_allowed_mask, &total);
+ if (idx < 0)
+ return -ENOENT;
+
+found:
+ *queue_p = &priv->tx_queue[idx];
+ *tx_allowed_mask_p = tx_allowed_mask;
+ return 0;
+}
+
+int wsm_get_tx(struct cw1200_common *priv, u8 **data,
+ size_t *tx_len, int *burst)
+{
+ struct wsm_tx *wsm = NULL;
+ struct ieee80211_tx_info *tx_info;
+ struct cw1200_queue *queue = NULL;
+ int queue_num;
+ u32 tx_allowed_mask = 0;
+ const struct cw1200_txpriv *txpriv = NULL;
+ int count = 0;
+
+ /* More is used only for broadcasts. */
+ bool more = false;
+
+ if (priv->wsm_cmd.ptr) { /* CMD request */
+ ++count;
+ spin_lock(&priv->wsm_cmd.lock);
+ BUG_ON(!priv->wsm_cmd.ptr);
+ *data = priv->wsm_cmd.ptr;
+ *tx_len = priv->wsm_cmd.len;
+ *burst = 1;
+ spin_unlock(&priv->wsm_cmd.lock);
+ } else {
+ for (;;) {
+ int ret;
+
+ if (atomic_add_return(0, &priv->tx_lock))
+ break;
+
+ spin_lock_bh(&priv->ps_state_lock);
+
+ ret = wsm_get_tx_queue_and_mask(priv, &queue,
+ &tx_allowed_mask, &more);
+ queue_num = queue - priv->tx_queue;
+
+ if (priv->buffered_multicasts &&
+ (ret || !more) &&
+ (priv->tx_multicast || !priv->sta_asleep_mask)) {
+ priv->buffered_multicasts = false;
+ if (priv->tx_multicast) {
+ priv->tx_multicast = false;
+ queue_work(priv->workqueue,
+ &priv->multicast_stop_work);
+ }
+ }
+
+ spin_unlock_bh(&priv->ps_state_lock);
+
+ if (ret)
+ break;
+
+ if (cw1200_queue_get(queue,
+ tx_allowed_mask,
+ &wsm, &tx_info, &txpriv))
+ continue;
+
+ if (wsm_handle_tx_data(priv, wsm,
+ tx_info, txpriv, queue))
+ continue; /* Handled by WSM */
+
+ wsm->hdr.id &= __cpu_to_le16(
+ ~WSM_TX_LINK_ID(WSM_TX_LINK_ID_MAX));
+ wsm->hdr.id |= cpu_to_le16(
+ WSM_TX_LINK_ID(txpriv->raw_link_id));
+ priv->pspoll_mask &= ~BIT(txpriv->raw_link_id);
+
+ *data = (u8 *)wsm;
+ *tx_len = __le16_to_cpu(wsm->hdr.len);
+
+ /* allow bursting if txop is set */
+ if (priv->edca.params[queue_num].txop_limit)
+ *burst = min(*burst,
+ (int)cw1200_queue_get_num_queued(queue, tx_allowed_mask) + 1);
+ else
+ *burst = 1;
+
+ /* store index of bursting queue */
+ if (*burst > 1)
+ priv->tx_burst_idx = queue_num;
+ else
+ priv->tx_burst_idx = -1;
+
+ if (more) {
+ struct ieee80211_hdr *hdr =
+ (struct ieee80211_hdr *)
+ &((u8 *)wsm)[txpriv->offset];
+ /* more buffered multicast/broadcast frames
+ * ==> set MoreData flag in IEEE 802.11 header
+ * to inform PS STAs
+ */
+ hdr->frame_control |=
+ cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+ }
+
+ pr_debug("[WSM] >>> 0x%.4X (%zu) %p %c\n",
+ 0x0004, *tx_len, *data,
+ wsm->more ? 'M' : ' ');
+ ++count;
+ break;
+ }
+ }
+
+ return count;
+}
+
+void wsm_txed(struct cw1200_common *priv, u8 *data)
+{
+ if (data == priv->wsm_cmd.ptr) {
+ spin_lock(&priv->wsm_cmd.lock);
+ priv->wsm_cmd.ptr = NULL;
+ spin_unlock(&priv->wsm_cmd.lock);
+ }
+}
+
+/* ******************************************************************** */
+/* WSM buffer */
+
+void wsm_buf_init(struct wsm_buf *buf)
+{
+ BUG_ON(buf->begin);
+ buf->begin = kmalloc(FWLOAD_BLOCK_SIZE, GFP_KERNEL | GFP_DMA);
+ buf->end = buf->begin ? &buf->begin[FWLOAD_BLOCK_SIZE] : buf->begin;
+ wsm_buf_reset(buf);
+}
+
+void wsm_buf_deinit(struct wsm_buf *buf)
+{
+ kfree(buf->begin);
+ buf->begin = buf->data = buf->end = NULL;
+}
+
+static void wsm_buf_reset(struct wsm_buf *buf)
+{
+ if (buf->begin) {
+ buf->data = &buf->begin[4];
+ *(u32 *)buf->begin = 0;
+ } else {
+ buf->data = buf->begin;
+ }
+}
+
+static int wsm_buf_reserve(struct wsm_buf *buf, size_t extra_size)
+{
+ size_t pos = buf->data - buf->begin;
+ size_t size = pos + extra_size;
+
+ size = round_up(size, FWLOAD_BLOCK_SIZE);
+
+ buf->begin = krealloc(buf->begin, size, GFP_KERNEL | GFP_DMA);
+ if (buf->begin) {
+ buf->data = &buf->begin[pos];
+ buf->end = &buf->begin[size];
+ return 0;
+ } else {
+ buf->end = buf->data = buf->begin;
+ return -ENOMEM;
+ }
+}
diff --git a/drivers/net/wireless/cw1200/wsm.h b/drivers/net/wireless/cw1200/wsm.h
new file mode 100644
index 000000000000..7afc613c3706
--- /dev/null
+++ b/drivers/net/wireless/cw1200/wsm.h
@@ -0,0 +1,1870 @@
+/*
+ * WSM host interface (HI) interface for ST-Ericsson CW1200 mac80211 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * Based on CW1200 UMAC WSM API, which is
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Stewart Mathers <stewart.mathers@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CW1200_WSM_H_INCLUDED
+#define CW1200_WSM_H_INCLUDED
+
+#include <linux/spinlock.h>
+
+struct cw1200_common;
+
+/* Bands */
+/* Radio band 2.412 -2.484 GHz. */
+#define WSM_PHY_BAND_2_4G (0)
+
+/* Radio band 4.9375-5.8250 GHz. */
+#define WSM_PHY_BAND_5G (1)
+
+/* Transmit rates */
+/* 1 Mbps ERP-DSSS */
+#define WSM_TRANSMIT_RATE_1 (0)
+
+/* 2 Mbps ERP-DSSS */
+#define WSM_TRANSMIT_RATE_2 (1)
+
+/* 5.5 Mbps ERP-CCK */
+#define WSM_TRANSMIT_RATE_5 (2)
+
+/* 11 Mbps ERP-CCK */
+#define WSM_TRANSMIT_RATE_11 (3)
+
+/* 22 Mbps ERP-PBCC (Not supported) */
+/* #define WSM_TRANSMIT_RATE_22 (4) */
+
+/* 33 Mbps ERP-PBCC (Not supported) */
+/* #define WSM_TRANSMIT_RATE_33 (5) */
+
+/* 6 Mbps (3 Mbps) ERP-OFDM, BPSK coding rate 1/2 */
+#define WSM_TRANSMIT_RATE_6 (6)
+
+/* 9 Mbps (4.5 Mbps) ERP-OFDM, BPSK coding rate 3/4 */
+#define WSM_TRANSMIT_RATE_9 (7)
+
+/* 12 Mbps (6 Mbps) ERP-OFDM, QPSK coding rate 1/2 */
+#define WSM_TRANSMIT_RATE_12 (8)
+
+/* 18 Mbps (9 Mbps) ERP-OFDM, QPSK coding rate 3/4 */
+#define WSM_TRANSMIT_RATE_18 (9)
+
+/* 24 Mbps (12 Mbps) ERP-OFDM, 16QAM coding rate 1/2 */
+#define WSM_TRANSMIT_RATE_24 (10)
+
+/* 36 Mbps (18 Mbps) ERP-OFDM, 16QAM coding rate 3/4 */
+#define WSM_TRANSMIT_RATE_36 (11)
+
+/* 48 Mbps (24 Mbps) ERP-OFDM, 64QAM coding rate 1/2 */
+#define WSM_TRANSMIT_RATE_48 (12)
+
+/* 54 Mbps (27 Mbps) ERP-OFDM, 64QAM coding rate 3/4 */
+#define WSM_TRANSMIT_RATE_54 (13)
+
+/* 6.5 Mbps HT-OFDM, BPSK coding rate 1/2 */
+#define WSM_TRANSMIT_RATE_HT_6 (14)
+
+/* 13 Mbps HT-OFDM, QPSK coding rate 1/2 */
+#define WSM_TRANSMIT_RATE_HT_13 (15)
+
+/* 19.5 Mbps HT-OFDM, QPSK coding rate 3/4 */
+#define WSM_TRANSMIT_RATE_HT_19 (16)
+
+/* 26 Mbps HT-OFDM, 16QAM coding rate 1/2 */
+#define WSM_TRANSMIT_RATE_HT_26 (17)
+
+/* 39 Mbps HT-OFDM, 16QAM coding rate 3/4 */
+#define WSM_TRANSMIT_RATE_HT_39 (18)
+
+/* 52 Mbps HT-OFDM, 64QAM coding rate 2/3 */
+#define WSM_TRANSMIT_RATE_HT_52 (19)
+
+/* 58.5 Mbps HT-OFDM, 64QAM coding rate 3/4 */
+#define WSM_TRANSMIT_RATE_HT_58 (20)
+
+/* 65 Mbps HT-OFDM, 64QAM coding rate 5/6 */
+#define WSM_TRANSMIT_RATE_HT_65 (21)
+
+/* Scan types */
+/* Foreground scan */
+#define WSM_SCAN_TYPE_FOREGROUND (0)
+
+/* Background scan */
+#define WSM_SCAN_TYPE_BACKGROUND (1)
+
+/* Auto scan */
+#define WSM_SCAN_TYPE_AUTO (2)
+
+/* Scan flags */
+/* Forced background scan means if the station cannot */
+/* enter the power-save mode, it shall force to perform a */
+/* background scan. Only valid when ScanType is */
+/* background scan. */
+#define WSM_SCAN_FLAG_FORCE_BACKGROUND (BIT(0))
+
+/* The WLAN device scans one channel at a time so */
+/* that disturbance to the data traffic is minimized. */
+#define WSM_SCAN_FLAG_SPLIT_METHOD (BIT(1))
+
+/* Preamble Type. Long if not set. */
+#define WSM_SCAN_FLAG_SHORT_PREAMBLE (BIT(2))
+
+/* 11n Tx Mode. Mixed if not set. */
+#define WSM_SCAN_FLAG_11N_GREENFIELD (BIT(3))
+
+/* Scan constraints */
+/* Maximum number of channels to be scanned. */
+#define WSM_SCAN_MAX_NUM_OF_CHANNELS (48)
+
+/* The maximum number of SSIDs that the device can scan for. */
+#define WSM_SCAN_MAX_NUM_OF_SSIDS (2)
+
+/* Power management modes */
+/* 802.11 Active mode */
+#define WSM_PSM_ACTIVE (0)
+
+/* 802.11 PS mode */
+#define WSM_PSM_PS BIT(0)
+
+/* Fast Power Save bit */
+#define WSM_PSM_FAST_PS_FLAG BIT(7)
+
+/* Dynamic aka Fast power save */
+#define WSM_PSM_FAST_PS (BIT(0) | BIT(7))
+
+/* Undetermined */
+/* Note : Undetermined status is reported when the */
+/* NULL data frame used to advertise the PM mode to */
+/* the AP at Pre or Post Background Scan is not Acknowledged */
+#define WSM_PSM_UNKNOWN BIT(1)
+
+/* Queue IDs */
+/* best effort/legacy */
+#define WSM_QUEUE_BEST_EFFORT (0)
+
+/* background */
+#define WSM_QUEUE_BACKGROUND (1)
+
+/* video */
+#define WSM_QUEUE_VIDEO (2)
+
+/* voice */
+#define WSM_QUEUE_VOICE (3)
+
+/* HT TX parameters */
+/* Non-HT */
+#define WSM_HT_TX_NON_HT (0)
+
+/* Mixed format */
+#define WSM_HT_TX_MIXED (1)
+
+/* Greenfield format */
+#define WSM_HT_TX_GREENFIELD (2)
+
+/* STBC allowed */
+#define WSM_HT_TX_STBC (BIT(7))
+
+/* EPTA prioirty flags for BT Coex */
+/* default epta priority */
+#define WSM_EPTA_PRIORITY_DEFAULT 4
+/* use for normal data */
+#define WSM_EPTA_PRIORITY_DATA 4
+/* use for connect/disconnect/roaming*/
+#define WSM_EPTA_PRIORITY_MGT 5
+/* use for action frames */
+#define WSM_EPTA_PRIORITY_ACTION 5
+/* use for AC_VI data */
+#define WSM_EPTA_PRIORITY_VIDEO 5
+/* use for AC_VO data */
+#define WSM_EPTA_PRIORITY_VOICE 6
+/* use for EAPOL exchange */
+#define WSM_EPTA_PRIORITY_EAPOL 7
+
+/* TX status */
+/* Frame was sent aggregated */
+/* Only valid for WSM_SUCCESS status. */
+#define WSM_TX_STATUS_AGGREGATION (BIT(0))
+
+/* Host should requeue this frame later. */
+/* Valid only when status is WSM_REQUEUE. */
+#define WSM_TX_STATUS_REQUEUE (BIT(1))
+
+/* Normal Ack */
+#define WSM_TX_STATUS_NORMAL_ACK (0<<2)
+
+/* No Ack */
+#define WSM_TX_STATUS_NO_ACK (1<<2)
+
+/* No explicit acknowledgement */
+#define WSM_TX_STATUS_NO_EXPLICIT_ACK (2<<2)
+
+/* Block Ack */
+/* Only valid for WSM_SUCCESS status. */
+#define WSM_TX_STATUS_BLOCK_ACK (3<<2)
+
+/* RX status */
+/* Unencrypted */
+#define WSM_RX_STATUS_UNENCRYPTED (0<<0)
+
+/* WEP */
+#define WSM_RX_STATUS_WEP (1<<0)
+
+/* TKIP */
+#define WSM_RX_STATUS_TKIP (2<<0)
+
+/* AES */
+#define WSM_RX_STATUS_AES (3<<0)
+
+/* WAPI */
+#define WSM_RX_STATUS_WAPI (4<<0)
+
+/* Macro to fetch encryption subfield. */
+#define WSM_RX_STATUS_ENCRYPTION(status) ((status) & 0x07)
+
+/* Frame was part of an aggregation */
+#define WSM_RX_STATUS_AGGREGATE (BIT(3))
+
+/* Frame was first in the aggregation */
+#define WSM_RX_STATUS_AGGREGATE_FIRST (BIT(4))
+
+/* Frame was last in the aggregation */
+#define WSM_RX_STATUS_AGGREGATE_LAST (BIT(5))
+
+/* Indicates a defragmented frame */
+#define WSM_RX_STATUS_DEFRAGMENTED (BIT(6))
+
+/* Indicates a Beacon frame */
+#define WSM_RX_STATUS_BEACON (BIT(7))
+
+/* Indicates STA bit beacon TIM field */
+#define WSM_RX_STATUS_TIM (BIT(8))
+
+/* Indicates Beacon frame's virtual bitmap contains multicast bit */
+#define WSM_RX_STATUS_MULTICAST (BIT(9))
+
+/* Indicates frame contains a matching SSID */
+#define WSM_RX_STATUS_MATCHING_SSID (BIT(10))
+
+/* Indicates frame contains a matching BSSI */
+#define WSM_RX_STATUS_MATCHING_BSSI (BIT(11))
+
+/* Indicates More bit set in Framectl field */
+#define WSM_RX_STATUS_MORE_DATA (BIT(12))
+
+/* Indicates frame received during a measurement process */
+#define WSM_RX_STATUS_MEASUREMENT (BIT(13))
+
+/* Indicates frame received as an HT packet */
+#define WSM_RX_STATUS_HT (BIT(14))
+
+/* Indicates frame received with STBC */
+#define WSM_RX_STATUS_STBC (BIT(15))
+
+/* Indicates Address 1 field matches dot11StationId */
+#define WSM_RX_STATUS_ADDRESS1 (BIT(16))
+
+/* Indicates Group address present in the Address 1 field */
+#define WSM_RX_STATUS_GROUP (BIT(17))
+
+/* Indicates Broadcast address present in the Address 1 field */
+#define WSM_RX_STATUS_BROADCAST (BIT(18))
+
+/* Indicates group key used with encrypted frames */
+#define WSM_RX_STATUS_GROUP_KEY (BIT(19))
+
+/* Macro to fetch encryption key index. */
+#define WSM_RX_STATUS_KEY_IDX(status) (((status >> 20)) & 0x0F)
+
+/* Indicates TSF inclusion after 802.11 frame body */
+#define WSM_RX_STATUS_TSF_INCLUDED (BIT(24))
+
+/* Frame Control field starts at Frame offset + 2 */
+#define WSM_TX_2BYTES_SHIFT (BIT(7))
+
+/* Join mode */
+/* IBSS */
+#define WSM_JOIN_MODE_IBSS (0)
+
+/* BSS */
+#define WSM_JOIN_MODE_BSS (1)
+
+/* PLCP preamble type */
+/* For long preamble */
+#define WSM_JOIN_PREAMBLE_LONG (0)
+
+/* For short preamble (Long for 1Mbps) */
+#define WSM_JOIN_PREAMBLE_SHORT (1)
+
+/* For short preamble (Long for 1 and 2Mbps) */
+#define WSM_JOIN_PREAMBLE_SHORT_2 (2)
+
+/* Join flags */
+/* Unsynchronized */
+#define WSM_JOIN_FLAGS_UNSYNCRONIZED BIT(0)
+/* The BSS owner is a P2P GO */
+#define WSM_JOIN_FLAGS_P2P_GO BIT(1)
+/* Force to join BSS with the BSSID and the
+ * SSID specified without waiting for beacons. The
+ * ProbeForJoin parameter is ignored.
+ */
+#define WSM_JOIN_FLAGS_FORCE BIT(2)
+/* Give probe request/response higher
+ * priority over the BT traffic
+ */
+#define WSM_JOIN_FLAGS_PRIO BIT(3)
+/* Issue immediate join confirmation and use
+ * join complete to notify about completion
+ */
+#define WSM_JOIN_FLAGS_FORCE_WITH_COMPLETE_IND BIT(5)
+
+/* Key types */
+#define WSM_KEY_TYPE_WEP_DEFAULT (0)
+#define WSM_KEY_TYPE_WEP_PAIRWISE (1)
+#define WSM_KEY_TYPE_TKIP_GROUP (2)
+#define WSM_KEY_TYPE_TKIP_PAIRWISE (3)
+#define WSM_KEY_TYPE_AES_GROUP (4)
+#define WSM_KEY_TYPE_AES_PAIRWISE (5)
+#define WSM_KEY_TYPE_WAPI_GROUP (6)
+#define WSM_KEY_TYPE_WAPI_PAIRWISE (7)
+
+/* Key indexes */
+#define WSM_KEY_MAX_INDEX (10)
+
+/* ACK policy */
+#define WSM_ACK_POLICY_NORMAL (0)
+#define WSM_ACK_POLICY_NO_ACK (1)
+
+/* Start modes */
+#define WSM_START_MODE_AP (0) /* Mini AP */
+#define WSM_START_MODE_P2P_GO (1) /* P2P GO */
+#define WSM_START_MODE_P2P_DEV (2) /* P2P device */
+
+/* SetAssociationMode MIB flags */
+#define WSM_ASSOCIATION_MODE_USE_PREAMBLE_TYPE (BIT(0))
+#define WSM_ASSOCIATION_MODE_USE_HT_MODE (BIT(1))
+#define WSM_ASSOCIATION_MODE_USE_BASIC_RATE_SET (BIT(2))
+#define WSM_ASSOCIATION_MODE_USE_MPDU_START_SPACING (BIT(3))
+#define WSM_ASSOCIATION_MODE_SNOOP_ASSOC_FRAMES (BIT(4))
+
+/* RcpiRssiThreshold MIB flags */
+#define WSM_RCPI_RSSI_THRESHOLD_ENABLE (BIT(0))
+#define WSM_RCPI_RSSI_USE_RSSI (BIT(1))
+#define WSM_RCPI_RSSI_DONT_USE_UPPER (BIT(2))
+#define WSM_RCPI_RSSI_DONT_USE_LOWER (BIT(3))
+
+/* Update-ie constants */
+#define WSM_UPDATE_IE_BEACON (BIT(0))
+#define WSM_UPDATE_IE_PROBE_RESP (BIT(1))
+#define WSM_UPDATE_IE_PROBE_REQ (BIT(2))
+
+/* WSM events */
+/* Error */
+#define WSM_EVENT_ERROR (0)
+
+/* BSS lost */
+#define WSM_EVENT_BSS_LOST (1)
+
+/* BSS regained */
+#define WSM_EVENT_BSS_REGAINED (2)
+
+/* Radar detected */
+#define WSM_EVENT_RADAR_DETECTED (3)
+
+/* RCPI or RSSI threshold triggered */
+#define WSM_EVENT_RCPI_RSSI (4)
+
+/* BT inactive */
+#define WSM_EVENT_BT_INACTIVE (5)
+
+/* BT active */
+#define WSM_EVENT_BT_ACTIVE (6)
+
+/* MIB IDs */
+/* 4.1 dot11StationId */
+#define WSM_MIB_ID_DOT11_STATION_ID 0x0000
+
+/* 4.2 dot11MaxtransmitMsduLifeTime */
+#define WSM_MIB_ID_DOT11_MAX_TRANSMIT_LIFTIME 0x0001
+
+/* 4.3 dot11MaxReceiveLifeTime */
+#define WSM_MIB_ID_DOT11_MAX_RECEIVE_LIFETIME 0x0002
+
+/* 4.4 dot11SlotTime */
+#define WSM_MIB_ID_DOT11_SLOT_TIME 0x0003
+
+/* 4.5 dot11GroupAddressesTable */
+#define WSM_MIB_ID_DOT11_GROUP_ADDRESSES_TABLE 0x0004
+#define WSM_MAX_GRP_ADDRTABLE_ENTRIES 8
+
+/* 4.6 dot11WepDefaultKeyId */
+#define WSM_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID 0x0005
+
+/* 4.7 dot11CurrentTxPowerLevel */
+#define WSM_MIB_ID_DOT11_CURRENT_TX_POWER_LEVEL 0x0006
+
+/* 4.8 dot11RTSThreshold */
+#define WSM_MIB_ID_DOT11_RTS_THRESHOLD 0x0007
+
+/* 4.9 NonErpProtection */
+#define WSM_MIB_ID_NON_ERP_PROTECTION 0x1000
+
+/* 4.10 ArpIpAddressesTable */
+#define WSM_MIB_ID_ARP_IP_ADDRESSES_TABLE 0x1001
+#define WSM_MAX_ARP_IP_ADDRTABLE_ENTRIES 1
+
+/* 4.11 TemplateFrame */
+#define WSM_MIB_ID_TEMPLATE_FRAME 0x1002
+
+/* 4.12 RxFilter */
+#define WSM_MIB_ID_RX_FILTER 0x1003
+
+/* 4.13 BeaconFilterTable */
+#define WSM_MIB_ID_BEACON_FILTER_TABLE 0x1004
+
+/* 4.14 BeaconFilterEnable */
+#define WSM_MIB_ID_BEACON_FILTER_ENABLE 0x1005
+
+/* 4.15 OperationalPowerMode */
+#define WSM_MIB_ID_OPERATIONAL_POWER_MODE 0x1006
+
+/* 4.16 BeaconWakeUpPeriod */
+#define WSM_MIB_ID_BEACON_WAKEUP_PERIOD 0x1007
+
+/* 4.17 RcpiRssiThreshold */
+#define WSM_MIB_ID_RCPI_RSSI_THRESHOLD 0x1009
+
+/* 4.18 StatisticsTable */
+#define WSM_MIB_ID_STATISTICS_TABLE 0x100A
+
+/* 4.19 IbssPsConfig */
+#define WSM_MIB_ID_IBSS_PS_CONFIG 0x100B
+
+/* 4.20 CountersTable */
+#define WSM_MIB_ID_COUNTERS_TABLE 0x100C
+
+/* 4.21 BlockAckPolicy */
+#define WSM_MIB_ID_BLOCK_ACK_POLICY 0x100E
+
+/* 4.22 OverrideInternalTxRate */
+#define WSM_MIB_ID_OVERRIDE_INTERNAL_TX_RATE 0x100F
+
+/* 4.23 SetAssociationMode */
+#define WSM_MIB_ID_SET_ASSOCIATION_MODE 0x1010
+
+/* 4.24 UpdateEptaConfigData */
+#define WSM_MIB_ID_UPDATE_EPTA_CONFIG_DATA 0x1011
+
+/* 4.25 SelectCcaMethod */
+#define WSM_MIB_ID_SELECT_CCA_METHOD 0x1012
+
+/* 4.26 SetUpasdInformation */
+#define WSM_MIB_ID_SET_UAPSD_INFORMATION 0x1013
+
+/* 4.27 SetAutoCalibrationMode WBF00004073 */
+#define WSM_MIB_ID_SET_AUTO_CALIBRATION_MODE 0x1015
+
+/* 4.28 SetTxRateRetryPolicy */
+#define WSM_MIB_ID_SET_TX_RATE_RETRY_POLICY 0x1016
+
+/* 4.29 SetHostMessageTypeFilter */
+#define WSM_MIB_ID_SET_HOST_MSG_TYPE_FILTER 0x1017
+
+/* 4.30 P2PFindInfo */
+#define WSM_MIB_ID_P2P_FIND_INFO 0x1018
+
+/* 4.31 P2PPsModeInfo */
+#define WSM_MIB_ID_P2P_PS_MODE_INFO 0x1019
+
+/* 4.32 SetEtherTypeDataFrameFilter */
+#define WSM_MIB_ID_SET_ETHERTYPE_DATAFRAME_FILTER 0x101A
+
+/* 4.33 SetUDPPortDataFrameFilter */
+#define WSM_MIB_ID_SET_UDPPORT_DATAFRAME_FILTER 0x101B
+
+/* 4.34 SetMagicDataFrameFilter */
+#define WSM_MIB_ID_SET_MAGIC_DATAFRAME_FILTER 0x101C
+
+/* 4.35 P2PDeviceInfo */
+#define WSM_MIB_ID_P2P_DEVICE_INFO 0x101D
+
+/* 4.36 SetWCDMABand */
+#define WSM_MIB_ID_SET_WCDMA_BAND 0x101E
+
+/* 4.37 GroupTxSequenceCounter */
+#define WSM_MIB_ID_GRP_SEQ_COUNTER 0x101F
+
+/* 4.38 ProtectedMgmtPolicy */
+#define WSM_MIB_ID_PROTECTED_MGMT_POLICY 0x1020
+
+/* 4.39 SetHtProtection */
+#define WSM_MIB_ID_SET_HT_PROTECTION 0x1021
+
+/* 4.40 GPIO Command */
+#define WSM_MIB_ID_GPIO_COMMAND 0x1022
+
+/* 4.41 TSF Counter Value */
+#define WSM_MIB_ID_TSF_COUNTER 0x1023
+
+/* Test Purposes Only */
+#define WSM_MIB_ID_BLOCK_ACK_INFO 0x100D
+
+/* 4.42 UseMultiTxConfMessage */
+#define WSM_MIB_USE_MULTI_TX_CONF 0x1024
+
+/* 4.43 Keep-alive period */
+#define WSM_MIB_ID_KEEP_ALIVE_PERIOD 0x1025
+
+/* 4.44 Disable BSSID filter */
+#define WSM_MIB_ID_DISABLE_BSSID_FILTER 0x1026
+
+/* Frame template types */
+#define WSM_FRAME_TYPE_PROBE_REQUEST (0)
+#define WSM_FRAME_TYPE_BEACON (1)
+#define WSM_FRAME_TYPE_NULL (2)
+#define WSM_FRAME_TYPE_QOS_NULL (3)
+#define WSM_FRAME_TYPE_PS_POLL (4)
+#define WSM_FRAME_TYPE_PROBE_RESPONSE (5)
+
+#define WSM_FRAME_GREENFIELD (0x80) /* See 4.11 */
+
+/* Status */
+/* The WSM firmware has completed a request */
+/* successfully. */
+#define WSM_STATUS_SUCCESS (0)
+
+/* This is a generic failure code if other error codes do */
+/* not apply. */
+#define WSM_STATUS_FAILURE (1)
+
+/* A request contains one or more invalid parameters. */
+#define WSM_INVALID_PARAMETER (2)
+
+/* The request cannot perform because the device is in */
+/* an inappropriate mode. */
+#define WSM_ACCESS_DENIED (3)
+
+/* The frame received includes a decryption error. */
+#define WSM_STATUS_DECRYPTFAILURE (4)
+
+/* A MIC failure is detected in the received packets. */
+#define WSM_STATUS_MICFAILURE (5)
+
+/* The transmit request failed due to retry limit being */
+/* exceeded. */
+#define WSM_STATUS_RETRY_EXCEEDED (6)
+
+/* The transmit request failed due to MSDU life time */
+/* being exceeded. */
+#define WSM_STATUS_TX_LIFETIME_EXCEEDED (7)
+
+/* The link to the AP is lost. */
+#define WSM_STATUS_LINK_LOST (8)
+
+/* No key was found for the encrypted frame */
+#define WSM_STATUS_NO_KEY_FOUND (9)
+
+/* Jammer was detected when transmitting this frame */
+#define WSM_STATUS_JAMMER_DETECTED (10)
+
+/* The message should be requeued later. */
+/* This is applicable only to Transmit */
+#define WSM_REQUEUE (11)
+
+/* Advanced filtering options */
+#define WSM_MAX_FILTER_ELEMENTS (4)
+
+#define WSM_FILTER_ACTION_IGNORE (0)
+#define WSM_FILTER_ACTION_FILTER_IN (1)
+#define WSM_FILTER_ACTION_FILTER_OUT (2)
+
+#define WSM_FILTER_PORT_TYPE_DST (0)
+#define WSM_FILTER_PORT_TYPE_SRC (1)
+
+/* Actual header of WSM messages */
+struct wsm_hdr {
+ __le16 len;
+ __le16 id;
+};
+
+#define WSM_TX_SEQ_MAX (7)
+#define WSM_TX_SEQ(seq) \
+ ((seq & WSM_TX_SEQ_MAX) << 13)
+#define WSM_TX_LINK_ID_MAX (0x0F)
+#define WSM_TX_LINK_ID(link_id) \
+ ((link_id & WSM_TX_LINK_ID_MAX) << 6)
+
+#define MAX_BEACON_SKIP_TIME_MS 1000
+
+#define WSM_CMD_LAST_CHANCE_TIMEOUT (HZ * 3 / 2)
+
+/* ******************************************************************** */
+/* WSM capability */
+
+#define WSM_STARTUP_IND_ID 0x0801
+
+struct wsm_startup_ind {
+ u16 input_buffers;
+ u16 input_buffer_size;
+ u16 status;
+ u16 hw_id;
+ u16 hw_subid;
+ u16 fw_cap;
+ u16 fw_type;
+ u16 fw_api;
+ u16 fw_build;
+ u16 fw_ver;
+ char fw_label[128];
+ u32 config[4];
+};
+
+/* ******************************************************************** */
+/* WSM commands */
+
+/* 3.1 */
+#define WSM_CONFIGURATION_REQ_ID 0x0009
+#define WSM_CONFIGURATION_RESP_ID 0x0409
+
+struct wsm_tx_power_range {
+ int min_power_level;
+ int max_power_level;
+ u32 stepping;
+};
+
+struct wsm_configuration {
+ /* [in] */ u32 dot11MaxTransmitMsduLifeTime;
+ /* [in] */ u32 dot11MaxReceiveLifeTime;
+ /* [in] */ u32 dot11RtsThreshold;
+ /* [in, out] */ u8 *dot11StationId;
+ /* [in] */ const void *dpdData;
+ /* [in] */ size_t dpdData_size;
+ /* [out] */ u8 dot11FrequencyBandsSupported;
+ /* [out] */ u32 supportedRateMask;
+ /* [out] */ struct wsm_tx_power_range txPowerRange[2];
+};
+
+int wsm_configuration(struct cw1200_common *priv,
+ struct wsm_configuration *arg);
+
+/* 3.3 */
+#define WSM_RESET_REQ_ID 0x000A
+#define WSM_RESET_RESP_ID 0x040A
+struct wsm_reset {
+ /* [in] */ int link_id;
+ /* [in] */ bool reset_statistics;
+};
+
+int wsm_reset(struct cw1200_common *priv, const struct wsm_reset *arg);
+
+/* 3.5 */
+#define WSM_READ_MIB_REQ_ID 0x0005
+#define WSM_READ_MIB_RESP_ID 0x0405
+int wsm_read_mib(struct cw1200_common *priv, u16 mib_id, void *buf,
+ size_t buf_size);
+
+/* 3.7 */
+#define WSM_WRITE_MIB_REQ_ID 0x0006
+#define WSM_WRITE_MIB_RESP_ID 0x0406
+int wsm_write_mib(struct cw1200_common *priv, u16 mib_id, void *buf,
+ size_t buf_size);
+
+/* 3.9 */
+#define WSM_START_SCAN_REQ_ID 0x0007
+#define WSM_START_SCAN_RESP_ID 0x0407
+
+struct wsm_ssid {
+ u8 ssid[32];
+ u32 length;
+};
+
+struct wsm_scan_ch {
+ u16 number;
+ u32 min_chan_time;
+ u32 max_chan_time;
+ u32 tx_power_level;
+};
+
+struct wsm_scan {
+ /* WSM_PHY_BAND_... */
+ u8 band;
+
+ /* WSM_SCAN_TYPE_... */
+ u8 type;
+
+ /* WSM_SCAN_FLAG_... */
+ u8 flags;
+
+ /* WSM_TRANSMIT_RATE_... */
+ u8 max_tx_rate;
+
+ /* Interval period in TUs that the device shall the re- */
+ /* execute the requested scan. Max value supported by the device */
+ /* is 256s. */
+ u32 auto_scan_interval;
+
+ /* Number of probe requests (per SSID) sent to one (1) */
+ /* channel. Zero (0) means that none is send, which */
+ /* means that a passive scan is to be done. Value */
+ /* greater than zero (0) means that an active scan is to */
+ /* be done. */
+ u32 num_probes;
+
+ /* Number of channels to be scanned. */
+ /* Maximum value is WSM_SCAN_MAX_NUM_OF_CHANNELS. */
+ u8 num_channels;
+
+ /* Number of SSID provided in the scan command (this */
+ /* is zero (0) in broadcast scan) */
+ /* The maximum number of SSIDs is WSM_SCAN_MAX_NUM_OF_SSIDS. */
+ u8 num_ssids;
+
+ /* The delay time (in microseconds) period */
+ /* before sending a probe-request. */
+ u8 probe_delay;
+
+ /* SSIDs to be scanned [numOfSSIDs]; */
+ struct wsm_ssid *ssids;
+
+ /* Channels to be scanned [numOfChannels]; */
+ struct wsm_scan_ch *ch;
+};
+
+int wsm_scan(struct cw1200_common *priv, const struct wsm_scan *arg);
+
+/* 3.11 */
+#define WSM_STOP_SCAN_REQ_ID 0x0008
+#define WSM_STOP_SCAN_RESP_ID 0x0408
+int wsm_stop_scan(struct cw1200_common *priv);
+
+/* 3.13 */
+#define WSM_SCAN_COMPLETE_IND_ID 0x0806
+struct wsm_scan_complete {
+ /* WSM_STATUS_... */
+ u32 status;
+
+ /* WSM_PSM_... */
+ u8 psm;
+
+ /* Number of channels that the scan operation completed. */
+ u8 num_channels;
+};
+
+/* 3.14 */
+#define WSM_TX_CONFIRM_IND_ID 0x0404
+#define WSM_MULTI_TX_CONFIRM_ID 0x041E
+
+struct wsm_tx_confirm {
+ /* Packet identifier used in wsm_tx. */
+ u32 packet_id;
+
+ /* WSM_STATUS_... */
+ u32 status;
+
+ /* WSM_TRANSMIT_RATE_... */
+ u8 tx_rate;
+
+ /* The number of times the frame was transmitted */
+ /* without receiving an acknowledgement. */
+ u8 ack_failures;
+
+ /* WSM_TX_STATUS_... */
+ u16 flags;
+
+ /* The total time in microseconds that the frame spent in */
+ /* the WLAN device before transmission as completed. */
+ u32 media_delay;
+
+ /* The total time in microseconds that the frame spent in */
+ /* the WLAN device before transmission was started. */
+ u32 tx_queue_delay;
+};
+
+/* 3.15 */
+typedef void (*wsm_tx_confirm_cb) (struct cw1200_common *priv,
+ struct wsm_tx_confirm *arg);
+
+/* Note that ideology of wsm_tx struct is different against the rest of
+ * WSM API. wsm_hdr is /not/ a caller-adapted struct to be used as an input
+ * argument for WSM call, but a prepared bytestream to be sent to firmware.
+ * It is filled partly in cw1200_tx, partly in low-level WSM code.
+ * Please pay attention once again: ideology is different.
+ *
+ * Legend:
+ * - [in]: cw1200_tx must fill this field.
+ * - [wsm]: the field is filled by low-level WSM.
+ */
+struct wsm_tx {
+ /* common WSM header */
+ struct wsm_hdr hdr;
+
+ /* Packet identifier that meant to be used in completion. */
+ u32 packet_id; /* Note this is actually a cookie */
+
+ /* WSM_TRANSMIT_RATE_... */
+ u8 max_tx_rate;
+
+ /* WSM_QUEUE_... */
+ u8 queue_id;
+
+ /* True: another packet is pending on the host for transmission. */
+ u8 more;
+
+ /* Bit 0 = 0 - Start expiry time from first Tx attempt (default) */
+ /* Bit 0 = 1 - Start expiry time from receipt of Tx Request */
+ /* Bits 3:1 - PTA Priority */
+ /* Bits 6:4 - Tx Rate Retry Policy */
+ /* Bit 7 - Reserved */
+ u8 flags;
+
+ /* Should be 0. */
+ u32 reserved;
+
+ /* The elapsed time in TUs, after the initial transmission */
+ /* of an MSDU, after which further attempts to transmit */
+ /* the MSDU shall be terminated. Overrides the global */
+ /* dot11MaxTransmitMsduLifeTime setting [optional] */
+ /* Device will set the default value if this is 0. */
+ u32 expire_time;
+
+ /* WSM_HT_TX_... */
+ __le32 ht_tx_parameters;
+} __packed;
+
+/* = sizeof(generic hi hdr) + sizeof(wsm hdr) + sizeof(alignment) */
+#define WSM_TX_EXTRA_HEADROOM (28)
+
+/* 3.16 */
+#define WSM_RECEIVE_IND_ID 0x0804
+
+struct wsm_rx {
+ /* WSM_STATUS_... */
+ u32 status;
+
+ /* Specifies the channel of the received packet. */
+ u16 channel_number;
+
+ /* WSM_TRANSMIT_RATE_... */
+ u8 rx_rate;
+
+ /* This value is expressed in signed Q8.0 format for */
+ /* RSSI and unsigned Q7.1 format for RCPI. */
+ u8 rcpi_rssi;
+
+ /* WSM_RX_STATUS_... */
+ u32 flags;
+};
+
+/* = sizeof(generic hi hdr) + sizeof(wsm hdr) */
+#define WSM_RX_EXTRA_HEADROOM (16)
+
+typedef void (*wsm_rx_cb) (struct cw1200_common *priv, struct wsm_rx *arg,
+ struct sk_buff **skb_p);
+
+/* 3.17 */
+struct wsm_event {
+ /* WSM_STATUS_... */
+ /* [out] */ u32 id;
+
+ /* Indication parameters. */
+ /* For error indication, this shall be a 32-bit WSM status. */
+ /* For RCPI or RSSI indication, this should be an 8-bit */
+ /* RCPI or RSSI value. */
+ /* [out] */ u32 data;
+};
+
+struct cw1200_wsm_event {
+ struct list_head link;
+ struct wsm_event evt;
+};
+
+/* 3.18 - 3.22 */
+/* Measurement. Skipped for now. Irrelevent. */
+
+typedef void (*wsm_event_cb) (struct cw1200_common *priv,
+ struct wsm_event *arg);
+
+/* 3.23 */
+#define WSM_JOIN_REQ_ID 0x000B
+#define WSM_JOIN_RESP_ID 0x040B
+
+struct wsm_join {
+ /* WSM_JOIN_MODE_... */
+ u8 mode;
+
+ /* WSM_PHY_BAND_... */
+ u8 band;
+
+ /* Specifies the channel number to join. The channel */
+ /* number will be mapped to an actual frequency */
+ /* according to the band */
+ u16 channel_number;
+
+ /* Specifies the BSSID of the BSS or IBSS to be joined */
+ /* or the IBSS to be started. */
+ u8 bssid[6];
+
+ /* ATIM window of IBSS */
+ /* When ATIM window is zero the initiated IBSS does */
+ /* not support power saving. */
+ u16 atim_window;
+
+ /* WSM_JOIN_PREAMBLE_... */
+ u8 preamble_type;
+
+ /* Specifies if a probe request should be send with the */
+ /* specified SSID when joining to the network. */
+ u8 probe_for_join;
+
+ /* DTIM Period (In multiples of beacon interval) */
+ u8 dtim_period;
+
+ /* WSM_JOIN_FLAGS_... */
+ u8 flags;
+
+ /* Length of the SSID */
+ u32 ssid_len;
+
+ /* Specifies the SSID of the IBSS to join or start */
+ u8 ssid[32];
+
+ /* Specifies the time between TBTTs in TUs */
+ u32 beacon_interval;
+
+ /* A bit mask that defines the BSS basic rate set. */
+ u32 basic_rate_set;
+};
+
+struct wsm_join_cnf {
+ u32 status;
+
+ /* Minimum transmission power level in units of 0.1dBm */
+ u32 min_power_level;
+
+ /* Maximum transmission power level in units of 0.1dBm */
+ u32 max_power_level;
+};
+
+int wsm_join(struct cw1200_common *priv, struct wsm_join *arg);
+
+/* 3.24 */
+struct wsm_join_complete {
+ /* WSM_STATUS_... */
+ u32 status;
+};
+
+/* 3.25 */
+#define WSM_SET_PM_REQ_ID 0x0010
+#define WSM_SET_PM_RESP_ID 0x0410
+struct wsm_set_pm {
+ /* WSM_PSM_... */
+ u8 mode;
+
+ /* in unit of 500us; 0 to use default */
+ u8 fast_psm_idle_period;
+
+ /* in unit of 500us; 0 to use default */
+ u8 ap_psm_change_period;
+
+ /* in unit of 500us; 0 to disable auto-pspoll */
+ u8 min_auto_pspoll_period;
+};
+
+int wsm_set_pm(struct cw1200_common *priv, const struct wsm_set_pm *arg);
+
+/* 3.27 */
+struct wsm_set_pm_complete {
+ u8 psm; /* WSM_PSM_... */
+};
+
+/* 3.28 */
+#define WSM_SET_BSS_PARAMS_REQ_ID 0x0011
+#define WSM_SET_BSS_PARAMS_RESP_ID 0x0411
+struct wsm_set_bss_params {
+ /* This resets the beacon loss counters only */
+ u8 reset_beacon_loss;
+
+ /* The number of lost consecutive beacons after which */
+ /* the WLAN device should indicate the BSS-Lost event */
+ /* to the WLAN host driver. */
+ u8 beacon_lost_count;
+
+ /* The AID received during the association process. */
+ u16 aid;
+
+ /* The operational rate set mask */
+ u32 operational_rate_set;
+};
+
+int wsm_set_bss_params(struct cw1200_common *priv,
+ const struct wsm_set_bss_params *arg);
+
+/* 3.30 */
+#define WSM_ADD_KEY_REQ_ID 0x000C
+#define WSM_ADD_KEY_RESP_ID 0x040C
+struct wsm_add_key {
+ u8 type; /* WSM_KEY_TYPE_... */
+ u8 index; /* Key entry index: 0 -- WSM_KEY_MAX_INDEX */
+ u16 reserved;
+ union {
+ struct {
+ u8 peer[6]; /* MAC address of the peer station */
+ u8 reserved;
+ u8 keylen; /* Key length in bytes */
+ u8 keydata[16]; /* Key data */
+ } __packed wep_pairwise;
+ struct {
+ u8 keyid; /* Unique per key identifier (0..3) */
+ u8 keylen; /* Key length in bytes */
+ u16 reserved;
+ u8 keydata[16]; /* Key data */
+ } __packed wep_group;
+ struct {
+ u8 peer[6]; /* MAC address of the peer station */
+ u16 reserved;
+ u8 keydata[16]; /* TKIP key data */
+ u8 rx_mic_key[8]; /* Rx MIC key */
+ u8 tx_mic_key[8]; /* Tx MIC key */
+ } __packed tkip_pairwise;
+ struct {
+ u8 keydata[16]; /* TKIP key data */
+ u8 rx_mic_key[8]; /* Rx MIC key */
+ u8 keyid; /* Key ID */
+ u8 reserved[3];
+ u8 rx_seqnum[8]; /* Receive Sequence Counter */
+ } __packed tkip_group;
+ struct {
+ u8 peer[6]; /* MAC address of the peer station */
+ u16 reserved;
+ u8 keydata[16]; /* AES key data */
+ } __packed aes_pairwise;
+ struct {
+ u8 keydata[16]; /* AES key data */
+ u8 keyid; /* Key ID */
+ u8 reserved[3];
+ u8 rx_seqnum[8]; /* Receive Sequence Counter */
+ } __packed aes_group;
+ struct {
+ u8 peer[6]; /* MAC address of the peer station */
+ u8 keyid; /* Key ID */
+ u8 reserved;
+ u8 keydata[16]; /* WAPI key data */
+ u8 mic_key[16]; /* MIC key data */
+ } __packed wapi_pairwise;
+ struct {
+ u8 keydata[16]; /* WAPI key data */
+ u8 mic_key[16]; /* MIC key data */
+ u8 keyid; /* Key ID */
+ u8 reserved[3];
+ } __packed wapi_group;
+ } __packed;
+} __packed;
+
+int wsm_add_key(struct cw1200_common *priv, const struct wsm_add_key *arg);
+
+/* 3.32 */
+#define WSM_REMOVE_KEY_REQ_ID 0x000D
+#define WSM_REMOVE_KEY_RESP_ID 0x040D
+struct wsm_remove_key {
+ u8 index; /* Key entry index : 0-10 */
+};
+
+int wsm_remove_key(struct cw1200_common *priv,
+ const struct wsm_remove_key *arg);
+
+/* 3.34 */
+struct wsm_set_tx_queue_params {
+ /* WSM_ACK_POLICY_... */
+ u8 ackPolicy;
+
+ /* Medium Time of TSPEC (in 32us units) allowed per */
+ /* One Second Averaging Period for this queue. */
+ u16 allowedMediumTime;
+
+ /* dot11MaxTransmitMsduLifetime to be used for the */
+ /* specified queue. */
+ u32 maxTransmitLifetime;
+};
+
+struct wsm_tx_queue_params {
+ /* NOTE: index is a linux queue id. */
+ struct wsm_set_tx_queue_params params[4];
+};
+
+
+#define WSM_TX_QUEUE_SET(queue_params, queue, ack_policy, allowed_time,\
+ max_life_time) \
+do { \
+ struct wsm_set_tx_queue_params *p = &(queue_params)->params[queue]; \
+ p->ackPolicy = (ack_policy); \
+ p->allowedMediumTime = (allowed_time); \
+ p->maxTransmitLifetime = (max_life_time); \
+} while (0)
+
+int wsm_set_tx_queue_params(struct cw1200_common *priv,
+ const struct wsm_set_tx_queue_params *arg, u8 id);
+
+/* 3.36 */
+#define WSM_EDCA_PARAMS_REQ_ID 0x0013
+#define WSM_EDCA_PARAMS_RESP_ID 0x0413
+struct wsm_edca_queue_params {
+ /* CWmin (in slots) for the access class. */
+ u16 cwmin;
+
+ /* CWmax (in slots) for the access class. */
+ u16 cwmax;
+
+ /* AIFS (in slots) for the access class. */
+ u16 aifns;
+
+ /* TX OP Limit (in microseconds) for the access class. */
+ u16 txop_limit;
+
+ /* dot11MaxReceiveLifetime to be used for the specified */
+ /* the access class. Overrides the global */
+ /* dot11MaxReceiveLifetime value */
+ u32 max_rx_lifetime;
+};
+
+struct wsm_edca_params {
+ /* NOTE: index is a linux queue id. */
+ struct wsm_edca_queue_params params[4];
+ bool uapsd_enable[4];
+};
+
+#define TXOP_UNIT 32
+#define WSM_EDCA_SET(__edca, __queue, __aifs, __cw_min, __cw_max, __txop, __lifetime,\
+ __uapsd) \
+ do { \
+ struct wsm_edca_queue_params *p = &(__edca)->params[__queue]; \
+ p->cwmin = __cw_min; \
+ p->cwmax = __cw_max; \
+ p->aifns = __aifs; \
+ p->txop_limit = ((__txop) * TXOP_UNIT); \
+ p->max_rx_lifetime = __lifetime; \
+ (__edca)->uapsd_enable[__queue] = (__uapsd); \
+ } while (0)
+
+int wsm_set_edca_params(struct cw1200_common *priv,
+ const struct wsm_edca_params *arg);
+
+int wsm_set_uapsd_param(struct cw1200_common *priv,
+ const struct wsm_edca_params *arg);
+
+/* 3.38 */
+/* Set-System info. Skipped for now. Irrelevent. */
+
+/* 3.40 */
+#define WSM_SWITCH_CHANNEL_REQ_ID 0x0016
+#define WSM_SWITCH_CHANNEL_RESP_ID 0x0416
+
+struct wsm_switch_channel {
+ /* 1 - means the STA shall not transmit any further */
+ /* frames until the channel switch has completed */
+ u8 mode;
+
+ /* Number of TBTTs until channel switch occurs. */
+ /* 0 - indicates switch shall occur at any time */
+ /* 1 - occurs immediately before the next TBTT */
+ u8 switch_count;
+
+ /* The new channel number to switch to. */
+ /* Note this is defined as per section 2.7. */
+ u16 channel_number;
+};
+
+int wsm_switch_channel(struct cw1200_common *priv,
+ const struct wsm_switch_channel *arg);
+
+typedef void (*wsm_channel_switch_cb) (struct cw1200_common *priv);
+
+#define WSM_START_REQ_ID 0x0017
+#define WSM_START_RESP_ID 0x0417
+
+struct wsm_start {
+ /* WSM_START_MODE_... */
+ /* [in] */ u8 mode;
+
+ /* WSM_PHY_BAND_... */
+ /* [in] */ u8 band;
+
+ /* Channel number */
+ /* [in] */ u16 channel_number;
+
+ /* Client Traffic window in units of TU */
+ /* Valid only when mode == ..._P2P */
+ /* [in] */ u32 ct_window;
+
+ /* Interval between two consecutive */
+ /* beacon transmissions in TU. */
+ /* [in] */ u32 beacon_interval;
+
+ /* DTIM period in terms of beacon intervals */
+ /* [in] */ u8 dtim_period;
+
+ /* WSM_JOIN_PREAMBLE_... */
+ /* [in] */ u8 preamble;
+
+ /* The delay time (in microseconds) period */
+ /* before sending a probe-request. */
+ /* [in] */ u8 probe_delay;
+
+ /* Length of the SSID */
+ /* [in] */ u8 ssid_len;
+
+ /* SSID of the BSS or P2P_GO to be started now. */
+ /* [in] */ u8 ssid[32];
+
+ /* The basic supported rates for the MiniAP. */
+ /* [in] */ u32 basic_rate_set;
+};
+
+int wsm_start(struct cw1200_common *priv, const struct wsm_start *arg);
+
+#define WSM_BEACON_TRANSMIT_REQ_ID 0x0018
+#define WSM_BEACON_TRANSMIT_RESP_ID 0x0418
+
+struct wsm_beacon_transmit {
+ /* 1: enable; 0: disable */
+ /* [in] */ u8 enable_beaconing;
+};
+
+int wsm_beacon_transmit(struct cw1200_common *priv,
+ const struct wsm_beacon_transmit *arg);
+
+int wsm_start_find(struct cw1200_common *priv);
+
+int wsm_stop_find(struct cw1200_common *priv);
+
+typedef void (*wsm_find_complete_cb) (struct cw1200_common *priv, u32 status);
+
+struct wsm_suspend_resume {
+ /* See 3.52 */
+ /* Link ID */
+ /* [out] */ int link_id;
+ /* Stop sending further Tx requests down to device for this link */
+ /* [out] */ bool stop;
+ /* Transmit multicast Frames */
+ /* [out] */ bool multicast;
+ /* The AC on which Tx to be suspended /resumed. */
+ /* This is applicable only for U-APSD */
+ /* WSM_QUEUE_... */
+ /* [out] */ int queue;
+};
+
+typedef void (*wsm_suspend_resume_cb) (struct cw1200_common *priv,
+ struct wsm_suspend_resume *arg);
+
+/* 3.54 Update-IE request. */
+struct wsm_update_ie {
+ /* WSM_UPDATE_IE_... */
+ /* [in] */ u16 what;
+ /* [in] */ u16 count;
+ /* [in] */ u8 *ies;
+ /* [in] */ size_t length;
+};
+
+int wsm_update_ie(struct cw1200_common *priv,
+ const struct wsm_update_ie *arg);
+
+/* 3.56 */
+struct wsm_map_link {
+ /* MAC address of the remote device */
+ /* [in] */ u8 mac_addr[6];
+ /* [in] */ u8 link_id;
+};
+
+int wsm_map_link(struct cw1200_common *priv, const struct wsm_map_link *arg);
+
+/* ******************************************************************** */
+/* MIB shortcats */
+
+static inline int wsm_set_output_power(struct cw1200_common *priv,
+ int power_level)
+{
+ __le32 val = __cpu_to_le32(power_level);
+ return wsm_write_mib(priv, WSM_MIB_ID_DOT11_CURRENT_TX_POWER_LEVEL,
+ &val, sizeof(val));
+}
+
+static inline int wsm_set_beacon_wakeup_period(struct cw1200_common *priv,
+ unsigned dtim_interval,
+ unsigned listen_interval)
+{
+ struct {
+ u8 numBeaconPeriods;
+ u8 reserved;
+ __le16 listenInterval;
+ } val = {
+ dtim_interval, 0, __cpu_to_le16(listen_interval)
+ };
+
+ if (dtim_interval > 0xFF || listen_interval > 0xFFFF)
+ return -EINVAL;
+ else
+ return wsm_write_mib(priv, WSM_MIB_ID_BEACON_WAKEUP_PERIOD,
+ &val, sizeof(val));
+}
+
+struct wsm_rcpi_rssi_threshold {
+ u8 rssiRcpiMode; /* WSM_RCPI_RSSI_... */
+ u8 lowerThreshold;
+ u8 upperThreshold;
+ u8 rollingAverageCount;
+};
+
+static inline int wsm_set_rcpi_rssi_threshold(struct cw1200_common *priv,
+ struct wsm_rcpi_rssi_threshold *arg)
+{
+ return wsm_write_mib(priv, WSM_MIB_ID_RCPI_RSSI_THRESHOLD, arg,
+ sizeof(*arg));
+}
+
+struct wsm_mib_counters_table {
+ __le32 plcp_errors;
+ __le32 fcs_errors;
+ __le32 tx_packets;
+ __le32 rx_packets;
+ __le32 rx_packet_errors;
+ __le32 rx_decryption_failures;
+ __le32 rx_mic_failures;
+ __le32 rx_no_key_failures;
+ __le32 tx_multicast_frames;
+ __le32 tx_frames_success;
+ __le32 tx_frame_failures;
+ __le32 tx_frames_retried;
+ __le32 tx_frames_multi_retried;
+ __le32 rx_frame_duplicates;
+ __le32 rts_success;
+ __le32 rts_failures;
+ __le32 ack_failures;
+ __le32 rx_multicast_frames;
+ __le32 rx_frames_success;
+ __le32 rx_cmac_icv_errors;
+ __le32 rx_cmac_replays;
+ __le32 rx_mgmt_ccmp_replays;
+} __packed;
+
+static inline int wsm_get_counters_table(struct cw1200_common *priv,
+ struct wsm_mib_counters_table *arg)
+{
+ return wsm_read_mib(priv, WSM_MIB_ID_COUNTERS_TABLE,
+ arg, sizeof(*arg));
+}
+
+static inline int wsm_get_station_id(struct cw1200_common *priv, u8 *mac)
+{
+ return wsm_read_mib(priv, WSM_MIB_ID_DOT11_STATION_ID, mac, ETH_ALEN);
+}
+
+struct wsm_rx_filter {
+ bool promiscuous;
+ bool bssid;
+ bool fcs;
+ bool probeResponder;
+};
+
+static inline int wsm_set_rx_filter(struct cw1200_common *priv,
+ const struct wsm_rx_filter *arg)
+{
+ __le32 val = 0;
+ if (arg->promiscuous)
+ val |= __cpu_to_le32(BIT(0));
+ if (arg->bssid)
+ val |= __cpu_to_le32(BIT(1));
+ if (arg->fcs)
+ val |= __cpu_to_le32(BIT(2));
+ if (arg->probeResponder)
+ val |= __cpu_to_le32(BIT(3));
+ return wsm_write_mib(priv, WSM_MIB_ID_RX_FILTER, &val, sizeof(val));
+}
+
+int wsm_set_probe_responder(struct cw1200_common *priv, bool enable);
+
+#define WSM_BEACON_FILTER_IE_HAS_CHANGED BIT(0)
+#define WSM_BEACON_FILTER_IE_NO_LONGER_PRESENT BIT(1)
+#define WSM_BEACON_FILTER_IE_HAS_APPEARED BIT(2)
+
+struct wsm_beacon_filter_table_entry {
+ u8 ie_id;
+ u8 flags;
+ u8 oui[3];
+ u8 match_data[3];
+} __packed;
+
+struct wsm_mib_beacon_filter_table {
+ __le32 num;
+ struct wsm_beacon_filter_table_entry entry[10];
+} __packed;
+
+static inline int wsm_set_beacon_filter_table(struct cw1200_common *priv,
+ struct wsm_mib_beacon_filter_table *ft)
+{
+ size_t size = __le32_to_cpu(ft->num) *
+ sizeof(struct wsm_beacon_filter_table_entry) +
+ sizeof(__le32);
+
+ return wsm_write_mib(priv, WSM_MIB_ID_BEACON_FILTER_TABLE, ft, size);
+}
+
+#define WSM_BEACON_FILTER_ENABLE BIT(0) /* Enable/disable beacon filtering */
+#define WSM_BEACON_FILTER_AUTO_ERP BIT(1) /* If 1 FW will handle ERP IE changes internally */
+
+struct wsm_beacon_filter_control {
+ int enabled;
+ int bcn_count;
+};
+
+static inline int wsm_beacon_filter_control(struct cw1200_common *priv,
+ struct wsm_beacon_filter_control *arg)
+{
+ struct {
+ __le32 enabled;
+ __le32 bcn_count;
+ } val;
+ val.enabled = __cpu_to_le32(arg->enabled);
+ val.bcn_count = __cpu_to_le32(arg->bcn_count);
+ return wsm_write_mib(priv, WSM_MIB_ID_BEACON_FILTER_ENABLE, &val,
+ sizeof(val));
+}
+
+enum wsm_power_mode {
+ wsm_power_mode_active = 0,
+ wsm_power_mode_doze = 1,
+ wsm_power_mode_quiescent = 2,
+};
+
+struct wsm_operational_mode {
+ enum wsm_power_mode power_mode;
+ int disable_more_flag_usage;
+ int perform_ant_diversity;
+};
+
+static inline int wsm_set_operational_mode(struct cw1200_common *priv,
+ const struct wsm_operational_mode *arg)
+{
+ u8 val = arg->power_mode;
+ if (arg->disable_more_flag_usage)
+ val |= BIT(4);
+ if (arg->perform_ant_diversity)
+ val |= BIT(5);
+ return wsm_write_mib(priv, WSM_MIB_ID_OPERATIONAL_POWER_MODE, &val,
+ sizeof(val));
+}
+
+struct wsm_template_frame {
+ u8 frame_type;
+ u8 rate;
+ struct sk_buff *skb;
+};
+
+static inline int wsm_set_template_frame(struct cw1200_common *priv,
+ struct wsm_template_frame *arg)
+{
+ int ret;
+ u8 *p = skb_push(arg->skb, 4);
+ p[0] = arg->frame_type;
+ p[1] = arg->rate;
+ ((__le16 *)p)[1] = __cpu_to_le16(arg->skb->len - 4);
+ ret = wsm_write_mib(priv, WSM_MIB_ID_TEMPLATE_FRAME, p, arg->skb->len);
+ skb_pull(arg->skb, 4);
+ return ret;
+}
+
+
+struct wsm_protected_mgmt_policy {
+ bool protectedMgmtEnable;
+ bool unprotectedMgmtFramesAllowed;
+ bool encryptionForAuthFrame;
+};
+
+static inline int wsm_set_protected_mgmt_policy(struct cw1200_common *priv,
+ struct wsm_protected_mgmt_policy *arg)
+{
+ __le32 val = 0;
+ int ret;
+ if (arg->protectedMgmtEnable)
+ val |= __cpu_to_le32(BIT(0));
+ if (arg->unprotectedMgmtFramesAllowed)
+ val |= __cpu_to_le32(BIT(1));
+ if (arg->encryptionForAuthFrame)
+ val |= __cpu_to_le32(BIT(2));
+ ret = wsm_write_mib(priv, WSM_MIB_ID_PROTECTED_MGMT_POLICY,
+ &val, sizeof(val));
+ return ret;
+}
+
+struct wsm_mib_block_ack_policy {
+ u8 tx_tid;
+ u8 reserved1;
+ u8 rx_tid;
+ u8 reserved2;
+} __packed;
+
+static inline int wsm_set_block_ack_policy(struct cw1200_common *priv,
+ u8 tx_tid_policy,
+ u8 rx_tid_policy)
+{
+ struct wsm_mib_block_ack_policy val = {
+ .tx_tid = tx_tid_policy,
+ .rx_tid = rx_tid_policy,
+ };
+ return wsm_write_mib(priv, WSM_MIB_ID_BLOCK_ACK_POLICY, &val,
+ sizeof(val));
+}
+
+struct wsm_mib_association_mode {
+ u8 flags; /* WSM_ASSOCIATION_MODE_... */
+ u8 preamble; /* WSM_JOIN_PREAMBLE_... */
+ u8 greenfield; /* 1 for greenfield */
+ u8 mpdu_start_spacing;
+ __le32 basic_rate_set;
+} __packed;
+
+static inline int wsm_set_association_mode(struct cw1200_common *priv,
+ struct wsm_mib_association_mode *arg)
+{
+ return wsm_write_mib(priv, WSM_MIB_ID_SET_ASSOCIATION_MODE, arg,
+ sizeof(*arg));
+}
+
+#define WSM_TX_RATE_POLICY_FLAG_TERMINATE_WHEN_FINISHED BIT(2)
+#define WSM_TX_RATE_POLICY_FLAG_COUNT_INITIAL_TRANSMIT BIT(3)
+struct wsm_tx_rate_retry_policy {
+ u8 index;
+ u8 short_retries;
+ u8 long_retries;
+ /* BIT(2) - Terminate retries when Tx rate retry policy
+ * finishes.
+ * BIT(3) - Count initial frame transmission as part of
+ * rate retry counting but not as a retry
+ * attempt
+ */
+ u8 flags;
+ u8 rate_recoveries;
+ u8 reserved[3];
+ __le32 rate_count_indices[3];
+} __packed;
+
+struct wsm_set_tx_rate_retry_policy {
+ u8 num;
+ u8 reserved[3];
+ struct wsm_tx_rate_retry_policy tbl[8];
+} __packed;
+
+static inline int wsm_set_tx_rate_retry_policy(struct cw1200_common *priv,
+ struct wsm_set_tx_rate_retry_policy *arg)
+{
+ size_t size = 4 + arg->num * sizeof(struct wsm_tx_rate_retry_policy);
+ return wsm_write_mib(priv, WSM_MIB_ID_SET_TX_RATE_RETRY_POLICY, arg,
+ size);
+}
+
+/* 4.32 SetEtherTypeDataFrameFilter */
+struct wsm_ether_type_filter_hdr {
+ u8 num; /* Up to WSM_MAX_FILTER_ELEMENTS */
+ u8 reserved[3];
+} __packed;
+
+struct wsm_ether_type_filter {
+ u8 action; /* WSM_FILTER_ACTION_XXX */
+ u8 reserved;
+ __le16 type; /* Type of ethernet frame */
+} __packed;
+
+static inline int wsm_set_ether_type_filter(struct cw1200_common *priv,
+ struct wsm_ether_type_filter_hdr *arg)
+{
+ size_t size = sizeof(struct wsm_ether_type_filter_hdr) +
+ arg->num * sizeof(struct wsm_ether_type_filter);
+ return wsm_write_mib(priv, WSM_MIB_ID_SET_ETHERTYPE_DATAFRAME_FILTER,
+ arg, size);
+}
+
+/* 4.33 SetUDPPortDataFrameFilter */
+struct wsm_udp_port_filter_hdr {
+ u8 num; /* Up to WSM_MAX_FILTER_ELEMENTS */
+ u8 reserved[3];
+} __packed;
+
+struct wsm_udp_port_filter {
+ u8 action; /* WSM_FILTER_ACTION_XXX */
+ u8 type; /* WSM_FILTER_PORT_TYPE_XXX */
+ __le16 port; /* Port number */
+} __packed;
+
+static inline int wsm_set_udp_port_filter(struct cw1200_common *priv,
+ struct wsm_udp_port_filter_hdr *arg)
+{
+ size_t size = sizeof(struct wsm_udp_port_filter_hdr) +
+ arg->num * sizeof(struct wsm_udp_port_filter);
+ return wsm_write_mib(priv, WSM_MIB_ID_SET_UDPPORT_DATAFRAME_FILTER,
+ arg, size);
+}
+
+/* Undocumented MIBs: */
+/* 4.35 P2PDeviceInfo */
+#define D11_MAX_SSID_LEN (32)
+
+struct wsm_p2p_device_type {
+ __le16 category_id;
+ u8 oui[4];
+ __le16 subcategory_id;
+} __packed;
+
+struct wsm_p2p_device_info {
+ struct wsm_p2p_device_type primaryDevice;
+ u8 reserved1[3];
+ u8 devname_size;
+ u8 local_devname[D11_MAX_SSID_LEN];
+ u8 reserved2[3];
+ u8 num_secdev_supported;
+ struct wsm_p2p_device_type secdevs[0];
+} __packed;
+
+/* 4.36 SetWCDMABand - WO */
+struct wsm_cdma_band {
+ u8 wcdma_band;
+ u8 reserved[3];
+} __packed;
+
+/* 4.37 GroupTxSequenceCounter - RO */
+struct wsm_group_tx_seq {
+ __le32 bits_47_16;
+ __le16 bits_15_00;
+ __le16 reserved;
+} __packed;
+
+/* 4.39 SetHtProtection - WO */
+#define WSM_DUAL_CTS_PROT_ENB (1 << 0)
+#define WSM_NON_GREENFIELD_STA_PRESENT (1 << 1)
+#define WSM_HT_PROT_MODE__NO_PROT (0 << 2)
+#define WSM_HT_PROT_MODE__NON_MEMBER (1 << 2)
+#define WSM_HT_PROT_MODE__20_MHZ (2 << 2)
+#define WSM_HT_PROT_MODE__NON_HT_MIXED (3 << 2)
+#define WSM_LSIG_TXOP_PROT_FULL (1 << 4)
+#define WSM_LARGE_L_LENGTH_PROT (1 << 5)
+
+struct wsm_ht_protection {
+ __le32 flags;
+} __packed;
+
+/* 4.40 GPIO Command - R/W */
+#define WSM_GPIO_COMMAND_SETUP 0
+#define WSM_GPIO_COMMAND_READ 1
+#define WSM_GPIO_COMMAND_WRITE 2
+#define WSM_GPIO_COMMAND_RESET 3
+#define WSM_GPIO_ALL_PINS 0xFF
+
+struct wsm_gpio_command {
+ u8 command;
+ u8 pin;
+ __le16 config;
+} __packed;
+
+/* 4.41 TSFCounter - RO */
+struct wsm_tsf_counter {
+ __le64 tsf_counter;
+} __packed;
+
+/* 4.43 Keep alive period */
+struct wsm_keep_alive_period {
+ __le16 period;
+ u8 reserved[2];
+} __packed;
+
+static inline int wsm_keep_alive_period(struct cw1200_common *priv,
+ int period)
+{
+ struct wsm_keep_alive_period arg = {
+ .period = __cpu_to_le16(period),
+ };
+ return wsm_write_mib(priv, WSM_MIB_ID_KEEP_ALIVE_PERIOD,
+ &arg, sizeof(arg));
+};
+
+/* BSSID filtering */
+struct wsm_set_bssid_filtering {
+ u8 filter;
+ u8 reserved[3];
+} __packed;
+
+static inline int wsm_set_bssid_filtering(struct cw1200_common *priv,
+ bool enabled)
+{
+ struct wsm_set_bssid_filtering arg = {
+ .filter = !enabled,
+ };
+ return wsm_write_mib(priv, WSM_MIB_ID_DISABLE_BSSID_FILTER,
+ &arg, sizeof(arg));
+}
+
+/* Multicast filtering - 4.5 */
+struct wsm_mib_multicast_filter {
+ __le32 enable;
+ __le32 num_addrs;
+ u8 macaddrs[WSM_MAX_GRP_ADDRTABLE_ENTRIES][ETH_ALEN];
+} __packed;
+
+static inline int wsm_set_multicast_filter(struct cw1200_common *priv,
+ struct wsm_mib_multicast_filter *fp)
+{
+ return wsm_write_mib(priv, WSM_MIB_ID_DOT11_GROUP_ADDRESSES_TABLE,
+ fp, sizeof(*fp));
+}
+
+/* ARP IPv4 filtering - 4.10 */
+struct wsm_mib_arp_ipv4_filter {
+ __le32 enable;
+ __be32 ipv4addrs[WSM_MAX_ARP_IP_ADDRTABLE_ENTRIES];
+} __packed;
+
+static inline int wsm_set_arp_ipv4_filter(struct cw1200_common *priv,
+ struct wsm_mib_arp_ipv4_filter *fp)
+{
+ return wsm_write_mib(priv, WSM_MIB_ID_ARP_IP_ADDRESSES_TABLE,
+ fp, sizeof(*fp));
+}
+
+/* P2P Power Save Mode Info - 4.31 */
+struct wsm_p2p_ps_modeinfo {
+ u8 opp_ps_ct_window;
+ u8 count;
+ u8 reserved;
+ u8 dtim_count;
+ __le32 duration;
+ __le32 interval;
+ __le32 start_time;
+} __packed;
+
+static inline int wsm_set_p2p_ps_modeinfo(struct cw1200_common *priv,
+ struct wsm_p2p_ps_modeinfo *mi)
+{
+ return wsm_write_mib(priv, WSM_MIB_ID_P2P_PS_MODE_INFO,
+ mi, sizeof(*mi));
+}
+
+static inline int wsm_get_p2p_ps_modeinfo(struct cw1200_common *priv,
+ struct wsm_p2p_ps_modeinfo *mi)
+{
+ return wsm_read_mib(priv, WSM_MIB_ID_P2P_PS_MODE_INFO,
+ mi, sizeof(*mi));
+}
+
+/* UseMultiTxConfMessage */
+
+static inline int wsm_use_multi_tx_conf(struct cw1200_common *priv,
+ bool enabled)
+{
+ __le32 arg = enabled ? __cpu_to_le32(1) : 0;
+
+ return wsm_write_mib(priv, WSM_MIB_USE_MULTI_TX_CONF,
+ &arg, sizeof(arg));
+}
+
+
+/* 4.26 SetUpasdInformation */
+struct wsm_uapsd_info {
+ __le16 uapsd_flags;
+ __le16 min_auto_trigger_interval;
+ __le16 max_auto_trigger_interval;
+ __le16 auto_trigger_step;
+};
+
+static inline int wsm_set_uapsd_info(struct cw1200_common *priv,
+ struct wsm_uapsd_info *arg)
+{
+ return wsm_write_mib(priv, WSM_MIB_ID_SET_UAPSD_INFORMATION,
+ arg, sizeof(*arg));
+}
+
+/* 4.22 OverrideInternalTxRate */
+struct wsm_override_internal_txrate {
+ u8 internalTxRate;
+ u8 nonErpInternalTxRate;
+ u8 reserved[2];
+} __packed;
+
+static inline int wsm_set_override_internal_txrate(struct cw1200_common *priv,
+ struct wsm_override_internal_txrate *arg)
+{
+ return wsm_write_mib(priv, WSM_MIB_ID_OVERRIDE_INTERNAL_TX_RATE,
+ arg, sizeof(*arg));
+}
+
+/* ******************************************************************** */
+/* WSM TX port control */
+
+void wsm_lock_tx(struct cw1200_common *priv);
+void wsm_lock_tx_async(struct cw1200_common *priv);
+bool wsm_flush_tx(struct cw1200_common *priv);
+void wsm_unlock_tx(struct cw1200_common *priv);
+
+/* ******************************************************************** */
+/* WSM / BH API */
+
+int wsm_handle_exception(struct cw1200_common *priv, u8 *data, size_t len);
+int wsm_handle_rx(struct cw1200_common *priv, u16 id, struct wsm_hdr *wsm,
+ struct sk_buff **skb_p);
+
+/* ******************************************************************** */
+/* wsm_buf API */
+
+struct wsm_buf {
+ u8 *begin;
+ u8 *data;
+ u8 *end;
+};
+
+void wsm_buf_init(struct wsm_buf *buf);
+void wsm_buf_deinit(struct wsm_buf *buf);
+
+/* ******************************************************************** */
+/* wsm_cmd API */
+
+struct wsm_cmd {
+ spinlock_t lock; /* Protect structure from multiple access */
+ int done;
+ u8 *ptr;
+ size_t len;
+ void *arg;
+ int ret;
+ u16 cmd;
+};
+
+/* ******************************************************************** */
+/* WSM TX buffer access */
+
+int wsm_get_tx(struct cw1200_common *priv, u8 **data,
+ size_t *tx_len, int *burst);
+void wsm_txed(struct cw1200_common *priv, u8 *data);
+
+/* ******************************************************************** */
+/* Queue mapping: WSM <---> linux */
+/* Linux: VO VI BE BK */
+/* WSM: BE BK VI VO */
+
+static inline u8 wsm_queue_id_to_linux(u8 queue_id)
+{
+ static const u8 queue_mapping[] = {
+ 2, 3, 1, 0
+ };
+ return queue_mapping[queue_id];
+}
+
+static inline u8 wsm_queue_id_to_wsm(u8 queue_id)
+{
+ static const u8 queue_mapping[] = {
+ 3, 2, 0, 1
+ };
+ return queue_mapping[queue_id];
+}
+
+#endif /* CW1200_HWIO_H_INCLUDED */
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 15920aaa5dd6..f8ab193009cd 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -6242,8 +6242,6 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
if ((val & 0x0000ff00) != 0)
pci_write_config_dword(pci_dev, 0x40, val & 0xffff00ff);
- pci_set_power_state(pci_dev, PCI_D0);
-
if (!ipw2100_hw_is_adapter_in_system(dev)) {
printk(KERN_WARNING DRV_NAME
"Device not found via register read.\n");
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 4ed5e45ca1e2..6b823a1ab789 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -3548,6 +3548,7 @@ static int ipw_load(struct ipw_priv *priv)
ipw_rx_queue_reset(priv, priv->rxq);
if (!priv->rxq) {
IPW_ERROR("Unable to initialize Rx queue\n");
+ rc = -ENOMEM;
goto error;
}
diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
index 95a1ca1e895c..9ffe65931b29 100644
--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
@@ -1195,7 +1195,7 @@ static int libipw_parse_info_param(struct libipw_info_element
#ifdef CONFIG_LIBIPW_DEBUG
p += snprintf(p, sizeof(rates_str) -
(p - rates_str), "%02X ",
- network->rates[i]);
+ network->rates_ex[i]);
#endif
if (libipw_is_ofdm_rate
(info_element->data[i])) {
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index b37a582ccbe7..9581d07a4242 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -3119,7 +3119,7 @@ il3945_store_debug_level(struct device *d, struct device_attribute *attr,
unsigned long val;
int ret;
- ret = strict_strtoul(buf, 0, &val);
+ ret = kstrtoul(buf, 0, &val);
if (ret)
IL_INFO("%s is not in hex or decimal form.\n", buf);
else
@@ -3727,7 +3727,8 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* 5. Setup HW Constants
* ********************/
/* Device-specific setup */
- if (il3945_hw_set_hw_params(il)) {
+ err = il3945_hw_set_hw_params(il);
+ if (err) {
IL_ERR("failed to set hw settings\n");
goto out_eeprom_free;
}
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
index dc1e6da9976a..c092033945cc 100644
--- a/drivers/net/wireless/iwlegacy/3945.c
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -331,6 +331,19 @@ il3945_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
return;
}
+ /*
+ * Firmware will not transmit frame on passive channel, if it not yet
+ * received some valid frame on that channel. When this error happen
+ * we have to wait until firmware will unblock itself i.e. when we
+ * note received beacon or other frame. We unblock queues in
+ * il3945_pass_packet_to_mac80211 or in il_mac_bss_info_changed.
+ */
+ if (unlikely((status & TX_STATUS_MSK) == TX_STATUS_FAIL_PASSIVE_NO_RX) &&
+ il->iw_mode == NL80211_IFTYPE_STATION) {
+ il_stop_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
+ D_INFO("Stopped queues - RX waiting on passive channel\n");
+ }
+
txq->time_stamp = jiffies;
info = IEEE80211_SKB_CB(txq->skbs[txq->q.read_ptr]);
ieee80211_tx_info_clear_status(info);
@@ -488,6 +501,11 @@ il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
return;
}
+ if (unlikely(test_bit(IL_STOP_REASON_PASSIVE, &il->stop_reason))) {
+ il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
+ D_INFO("Woke queues - frame received on passive channel\n");
+ }
+
skb = dev_alloc_skb(128);
if (!skb) {
IL_ERR("dev_alloc_skb failed\n");
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 9a95045c97b6..b9b2bb51e605 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -588,6 +588,11 @@ il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
return;
}
+ if (unlikely(test_bit(IL_STOP_REASON_PASSIVE, &il->stop_reason))) {
+ il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
+ D_INFO("Woke queues - frame received on passive channel\n");
+ }
+
/* In case of HW accelerated crypto and bad decryption, drop */
if (!il->cfg->mod_params->sw_crypto &&
il_set_decrypted_flag(il, hdr, ampdu_status, stats))
@@ -2806,6 +2811,19 @@ il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
return;
}
+ /*
+ * Firmware will not transmit frame on passive channel, if it not yet
+ * received some valid frame on that channel. When this error happen
+ * we have to wait until firmware will unblock itself i.e. when we
+ * note received beacon or other frame. We unblock queues in
+ * il4965_pass_packet_to_mac80211 or in il_mac_bss_info_changed.
+ */
+ if (unlikely((status & TX_STATUS_MSK) == TX_STATUS_FAIL_PASSIVE_NO_RX) &&
+ il->iw_mode == NL80211_IFTYPE_STATION) {
+ il_stop_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
+ D_INFO("Stopped queues - RX waiting on passive channel\n");
+ }
+
spin_lock_irqsave(&il->sta_lock, flags);
if (txq->sched_retry) {
const u32 scd_ssn = il4965_get_scd_ssn(tx_resp);
@@ -4567,7 +4585,7 @@ il4965_store_debug_level(struct device *d, struct device_attribute *attr,
unsigned long val;
int ret;
- ret = strict_strtoul(buf, 0, &val);
+ ret = kstrtoul(buf, 0, &val);
if (ret)
IL_ERR("%s is not in hex or decimal form.\n", buf);
else
@@ -4614,7 +4632,7 @@ il4965_store_tx_power(struct device *d, struct device_attribute *attr,
unsigned long val;
int ret;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
IL_INFO("%s is not in decimal form.\n", buf);
else {
@@ -5741,7 +5759,8 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
hw->flags =
IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
if (il->cfg->sku & IL_SKU_N)
hw->flags |=
IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
diff --git a/drivers/net/wireless/iwlegacy/commands.h b/drivers/net/wireless/iwlegacy/commands.h
index 3b6c99400892..048421511988 100644
--- a/drivers/net/wireless/iwlegacy/commands.h
+++ b/drivers/net/wireless/iwlegacy/commands.h
@@ -1348,14 +1348,6 @@ struct il_rx_mpdu_res_start {
#define TX_CMD_SEC_KEY128 0x08
/*
- * security overhead sizes
- */
-#define WEP_IV_LEN 4
-#define WEP_ICV_LEN 4
-#define CCMP_MIC_LEN 8
-#define TKIP_ICV_LEN 4
-
-/*
* C_TX = 0x1c (command)
*/
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index e9a3cbc409ae..3195aad440dd 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -5307,6 +5307,17 @@ il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
D_MAC80211("BSSID %pM\n", bss_conf->bssid);
/*
+ * On passive channel we wait with blocked queues to see if
+ * there is traffic on that channel. If no frame will be
+ * received (what is very unlikely since scan detects AP on
+ * that channel, but theoretically possible), mac80211 associate
+ * procedure will time out and mac80211 will call us with NULL
+ * bssid. We have to unblock queues on such condition.
+ */
+ if (is_zero_ether_addr(bss_conf->bssid))
+ il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
+
+ /*
* If there is currently a HW scan going on in the background,
* then we need to cancel it, otherwise sometimes we are not
* able to authenticate (FIXME: why ?)
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index 4caaf52986a4..83f8ed8a5528 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -1299,6 +1299,8 @@ struct il_priv {
/* queue refcounts */
#define IL_MAX_HW_QUEUES 32
unsigned long queue_stopped[BITS_TO_LONGS(IL_MAX_HW_QUEUES)];
+#define IL_STOP_REASON_PASSIVE 0
+ unsigned long stop_reason;
/* for each AC */
atomic_t queue_stop_count[4];
@@ -2257,6 +2259,19 @@ il_set_swq_id(struct il_tx_queue *txq, u8 ac, u8 hwq)
}
static inline void
+_il_wake_queue(struct il_priv *il, u8 ac)
+{
+ if (atomic_dec_return(&il->queue_stop_count[ac]) <= 0)
+ ieee80211_wake_queue(il->hw, ac);
+}
+
+static inline void
+_il_stop_queue(struct il_priv *il, u8 ac)
+{
+ if (atomic_inc_return(&il->queue_stop_count[ac]) > 0)
+ ieee80211_stop_queue(il->hw, ac);
+}
+static inline void
il_wake_queue(struct il_priv *il, struct il_tx_queue *txq)
{
u8 queue = txq->swq_id;
@@ -2264,8 +2279,7 @@ il_wake_queue(struct il_priv *il, struct il_tx_queue *txq)
u8 hwq = (queue >> 2) & 0x1f;
if (test_and_clear_bit(hwq, il->queue_stopped))
- if (atomic_dec_return(&il->queue_stop_count[ac]) <= 0)
- ieee80211_wake_queue(il->hw, ac);
+ _il_wake_queue(il, ac);
}
static inline void
@@ -2276,8 +2290,27 @@ il_stop_queue(struct il_priv *il, struct il_tx_queue *txq)
u8 hwq = (queue >> 2) & 0x1f;
if (!test_and_set_bit(hwq, il->queue_stopped))
- if (atomic_inc_return(&il->queue_stop_count[ac]) > 0)
- ieee80211_stop_queue(il->hw, ac);
+ _il_stop_queue(il, ac);
+}
+
+static inline void
+il_wake_queues_by_reason(struct il_priv *il, int reason)
+{
+ u8 ac;
+
+ if (test_and_clear_bit(reason, &il->stop_reason))
+ for (ac = 0; ac < 4; ac++)
+ _il_wake_queue(il, ac);
+}
+
+static inline void
+il_stop_queues_by_reason(struct il_priv *il, int reason)
+{
+ u8 ac;
+
+ if (!test_and_set_bit(reason, &il->stop_reason))
+ for (ac = 0; ac < 4; ac++)
+ _il_stop_queue(il, ac);
}
#ifdef ieee80211_stop_queue
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 56c2040a955b..cbaa5c2c410f 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -128,16 +128,6 @@ config IWLWIFI_DEVICE_TRACING
occur.
endmenu
-config IWLWIFI_DEVICE_TESTMODE
- def_bool y
- depends on IWLWIFI
- depends on NL80211_TESTMODE
- help
- This option enables the testmode support for iwlwifi device through
- NL80211_TESTMODE. This provide the capabilities of enable user space
- validation applications to interacts with the device through the
- generic netlink message via NL80211_TESTMODE channel.
-
config IWLWIFI_P2P
def_bool y
bool "iwlwifi experimental P2P support"
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 3b5613ea458b..1fa64429bcc2 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -7,14 +7,15 @@ iwlwifi-objs += iwl-notif-wait.o
iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
-iwlwifi-objs += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o iwl-7000.o
+iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o
+iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o
+
+iwlwifi-objs += $(iwlwifi-m)
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
-iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-test.o
ccflags-y += -D__CHECK_ENDIAN__ -I$(src)
-
obj-$(CONFIG_IWLDVM) += dvm/
obj-$(CONFIG_IWLMVM) += mvm/
diff --git a/drivers/net/wireless/iwlwifi/dvm/Makefile b/drivers/net/wireless/iwlwifi/dvm/Makefile
index 5ff76b204141..dce7ab2e0c4b 100644
--- a/drivers/net/wireless/iwlwifi/dvm/Makefile
+++ b/drivers/net/wireless/iwlwifi/dvm/Makefile
@@ -8,6 +8,5 @@ iwldvm-objs += scan.o led.o
iwldvm-objs += rxon.o devices.o
iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
-iwldvm-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += testmode.o
ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 48545ab00311..18355110deff 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -76,13 +76,16 @@
#define IWL_INVALID_STATION 255
/* device operations */
-extern struct iwl_lib_ops iwl1000_lib;
-extern struct iwl_lib_ops iwl2000_lib;
-extern struct iwl_lib_ops iwl2030_lib;
-extern struct iwl_lib_ops iwl5000_lib;
-extern struct iwl_lib_ops iwl5150_lib;
-extern struct iwl_lib_ops iwl6000_lib;
-extern struct iwl_lib_ops iwl6030_lib;
+extern const struct iwl_dvm_cfg iwl_dvm_1000_cfg;
+extern const struct iwl_dvm_cfg iwl_dvm_2000_cfg;
+extern const struct iwl_dvm_cfg iwl_dvm_105_cfg;
+extern const struct iwl_dvm_cfg iwl_dvm_2030_cfg;
+extern const struct iwl_dvm_cfg iwl_dvm_5000_cfg;
+extern const struct iwl_dvm_cfg iwl_dvm_5150_cfg;
+extern const struct iwl_dvm_cfg iwl_dvm_6000_cfg;
+extern const struct iwl_dvm_cfg iwl_dvm_6005_cfg;
+extern const struct iwl_dvm_cfg iwl_dvm_6050_cfg;
+extern const struct iwl_dvm_cfg iwl_dvm_6030_cfg;
#define TIME_UNIT 1024
@@ -291,8 +294,8 @@ void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena);
static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
{
- return priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist;
+ return priv->lib->bt_params &&
+ priv->lib->bt_params->advanced_bt_coexist;
}
#ifdef CONFIG_IWLWIFI_DEBUG
@@ -402,43 +405,6 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
extern int iwl_alive_start(struct iwl_priv *priv);
-/* testmode support */
-#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
-
-extern int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data,
- int len);
-extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw,
- struct sk_buff *skb,
- struct netlink_callback *cb,
- void *data, int len);
-extern void iwl_testmode_init(struct iwl_priv *priv);
-extern void iwl_testmode_free(struct iwl_priv *priv);
-
-#else
-
-static inline
-int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
-{
- return -ENOSYS;
-}
-
-static inline
-int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
- struct netlink_callback *cb,
- void *data, int len)
-{
- return -ENOSYS;
-}
-
-static inline void iwl_testmode_init(struct iwl_priv *priv)
-{
-}
-
-static inline void iwl_testmode_free(struct iwl_priv *priv)
-{
-}
-#endif
-
#ifdef CONFIG_IWLWIFI_DEBUG
void iwl_print_rx_config_cmd(struct iwl_priv *priv,
enum iwl_rxon_context_id ctxid);
diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.c b/drivers/net/wireless/iwlwifi/dvm/calib.c
index d6c4cf2ad7c5..1b0f0d502568 100644
--- a/drivers/net/wireless/iwlwifi/dvm/calib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.c
@@ -521,7 +521,7 @@ static int iwl_enhance_sensitivity_write(struct iwl_priv *priv)
iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.enhance_table[0]);
- if (priv->cfg->base_params->hd_v2) {
+ if (priv->lib->hd_v2) {
cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] =
HD_INA_NON_SQUARE_DET_OFDM_DATA_V2;
cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] =
@@ -895,7 +895,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv,
continue;
}
- delta_g = (priv->cfg->base_params->chain_noise_scale *
+ delta_g = (priv->lib->chain_noise_scale *
((s32)average_noise[default_chain] -
(s32)average_noise[i])) / 1500;
@@ -1051,8 +1051,8 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
return;
/* Analyze signal for disconnected antenna */
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
+ if (priv->lib->bt_params &&
+ priv->lib->bt_params->advanced_bt_coexist) {
/* Disable disconnected antenna algorithm for advanced
bt coex, assuming valid antennas are connected */
data->active_chains = priv->nvm_data->valid_rx_ant;
diff --git a/drivers/net/wireless/iwlwifi/dvm/commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h
index 95ca026ecc9d..ebdac909f0cd 100644
--- a/drivers/net/wireless/iwlwifi/dvm/commands.h
+++ b/drivers/net/wireless/iwlwifi/dvm/commands.h
@@ -838,10 +838,6 @@ struct iwl_qosparam_cmd {
#define STA_MODIFY_DELBA_TID_MSK 0x10
#define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20
-/* Receiver address (actually, Rx station's index into station table),
- * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
-#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
-
/* agn */
struct iwl_keyinfo {
__le16 key_flags;
@@ -1225,14 +1221,6 @@ struct iwl_rx_mpdu_res_start {
#define TX_CMD_SEC_KEY128 0x08
/*
- * security overhead sizes
- */
-#define WEP_IV_LEN 4
-#define WEP_ICV_LEN 4
-#define CCMP_MIC_LEN 8
-#define TKIP_ICV_LEN 4
-
-/*
* REPLY_TX = 0x1c (command)
*/
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 71ea77576d22..60a4e0d15715 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -52,8 +52,6 @@
#include "rs.h"
#include "tt.h"
-#include "iwl-test.h"
-
/* CT-KILL constants */
#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
#define CT_KILL_THRESHOLD 114 /* in Celsius */
@@ -568,16 +566,61 @@ struct iwl_hw_params {
const struct iwl_sensitivity_ranges *sens;
};
-struct iwl_lib_ops {
- /* set hw dependent parameters */
+/**
+ * struct iwl_dvm_bt_params - DVM specific BT (coex) parameters
+ * @advanced_bt_coexist: support advanced bt coexist
+ * @bt_init_traffic_load: specify initial bt traffic load
+ * @bt_prio_boost: default bt priority boost value
+ * @agg_time_limit: maximum number of uSec in aggregation
+ * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode
+ */
+struct iwl_dvm_bt_params {
+ bool advanced_bt_coexist;
+ u8 bt_init_traffic_load;
+ u32 bt_prio_boost;
+ u16 agg_time_limit;
+ bool bt_sco_disable;
+ bool bt_session_2;
+};
+
+/**
+ * struct iwl_dvm_cfg - DVM firmware specific device configuration
+ * @set_hw_params: set hardware parameters
+ * @set_channel_switch: send channel switch command
+ * @nic_config: apply device specific configuration
+ * @temperature: read temperature
+ * @adv_thermal_throttle: support advance thermal throttle
+ * @support_ct_kill_exit: support ct kill exit condition
+ * @plcp_delta_threshold: plcp error rate threshold used to trigger
+ * radio tuning when there is a high receiving plcp error rate
+ * @chain_noise_scale: default chain noise scale used for gain computation
+ * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
+ * @no_idle_support: do not support idle mode
+ * @bt_params: pointer to BT parameters
+ * @need_temp_offset_calib: need to perform temperature offset calibration
+ * @no_xtal_calib: some devices do not need crystal calibration data,
+ * don't send it to those
+ * @temp_offset_v2: support v2 of temperature offset calibration
+ * @adv_pm: advanced power management
+ */
+struct iwl_dvm_cfg {
void (*set_hw_params)(struct iwl_priv *priv);
int (*set_channel_switch)(struct iwl_priv *priv,
struct ieee80211_channel_switch *ch_switch);
- /* device specific configuration */
void (*nic_config)(struct iwl_priv *priv);
-
- /* temperature */
void (*temperature)(struct iwl_priv *priv);
+
+ const struct iwl_dvm_bt_params *bt_params;
+ s32 chain_noise_scale;
+ u8 plcp_delta_threshold;
+ bool adv_thermal_throttle;
+ bool support_ct_kill_exit;
+ bool hd_v2;
+ bool no_idle_support;
+ bool need_temp_offset_calib;
+ bool no_xtal_calib;
+ bool temp_offset_v2;
+ bool adv_pm;
};
struct iwl_wipan_noa_data {
@@ -610,7 +653,7 @@ struct iwl_priv {
struct device *dev; /* for debug prints only */
const struct iwl_cfg *cfg;
const struct iwl_fw *fw;
- const struct iwl_lib_ops *lib;
+ const struct iwl_dvm_cfg *lib;
unsigned long status;
spinlock_t sta_lock;
@@ -646,10 +689,6 @@ struct iwl_priv {
struct iwl_spectrum_notification measure_report;
u8 measurement_status;
-#define IWL_OWNERSHIP_DRIVER 0
-#define IWL_OWNERSHIP_TM 1
- u8 ucode_owner;
-
/* ucode beacon time */
u32 ucode_beacon_time;
int missed_beacon_threshold;
@@ -844,7 +883,7 @@ struct iwl_priv {
#endif /* CONFIG_IWLWIFI_DEBUGFS */
struct iwl_nvm_data *nvm_data;
- /* eeprom blob for debugfs/testmode */
+ /* eeprom blob for debugfs */
u8 *eeprom_blob;
size_t eeprom_blob_size;
@@ -860,16 +899,14 @@ struct iwl_priv {
unsigned long blink_on, blink_off;
bool led_registered;
-#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
- struct iwl_test tst;
- u32 tm_fixed_rate;
-#endif
-
/* WoWLAN GTK rekey data */
u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN];
__le64 replay_ctr;
__le16 last_seq_ctl;
bool have_rekey_data;
+#ifdef CONFIG_PM_SLEEP
+ struct wiphy_wowlan_support wowlan_support;
+#endif
/* device_pointers: pointers to ucode event tables */
struct {
diff --git a/drivers/net/wireless/iwlwifi/dvm/devices.c b/drivers/net/wireless/iwlwifi/dvm/devices.c
index c48907c8ab43..352c6cb7b4f1 100644
--- a/drivers/net/wireless/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/iwlwifi/dvm/devices.c
@@ -174,10 +174,13 @@ static void iwl1000_hw_set_hw_params(struct iwl_priv *priv)
priv->hw_params.sens = &iwl1000_sensitivity;
}
-struct iwl_lib_ops iwl1000_lib = {
+const struct iwl_dvm_cfg iwl_dvm_1000_cfg = {
.set_hw_params = iwl1000_hw_set_hw_params,
.nic_config = iwl1000_nic_config,
.temperature = iwlagn_temperature,
+ .support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
@@ -232,16 +235,56 @@ static void iwl2000_hw_set_hw_params(struct iwl_priv *priv)
priv->hw_params.sens = &iwl2000_sensitivity;
}
-struct iwl_lib_ops iwl2000_lib = {
+const struct iwl_dvm_cfg iwl_dvm_2000_cfg = {
.set_hw_params = iwl2000_hw_set_hw_params,
.nic_config = iwl2000_nic_config,
.temperature = iwlagn_temperature,
+ .adv_thermal_throttle = true,
+ .support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
+ .hd_v2 = true,
+ .need_temp_offset_calib = true,
+ .temp_offset_v2 = true,
};
-struct iwl_lib_ops iwl2030_lib = {
+const struct iwl_dvm_cfg iwl_dvm_105_cfg = {
.set_hw_params = iwl2000_hw_set_hw_params,
.nic_config = iwl2000_nic_config,
.temperature = iwlagn_temperature,
+ .adv_thermal_throttle = true,
+ .support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
+ .hd_v2 = true,
+ .need_temp_offset_calib = true,
+ .temp_offset_v2 = true,
+ .adv_pm = true,
+};
+
+static const struct iwl_dvm_bt_params iwl2030_bt_params = {
+ /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
+ .advanced_bt_coexist = true,
+ .agg_time_limit = BT_AGG_THRESHOLD_DEF,
+ .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
+ .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT32,
+ .bt_sco_disable = true,
+ .bt_session_2 = true,
+};
+
+const struct iwl_dvm_cfg iwl_dvm_2030_cfg = {
+ .set_hw_params = iwl2000_hw_set_hw_params,
+ .nic_config = iwl2000_nic_config,
+ .temperature = iwlagn_temperature,
+ .adv_thermal_throttle = true,
+ .support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
+ .hd_v2 = true,
+ .bt_params = &iwl2030_bt_params,
+ .need_temp_offset_calib = true,
+ .temp_offset_v2 = true,
+ .adv_pm = true,
};
/*
@@ -420,16 +463,23 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
return iwl_dvm_send_cmd(priv, &hcmd);
}
-struct iwl_lib_ops iwl5000_lib = {
+const struct iwl_dvm_cfg iwl_dvm_5000_cfg = {
.set_hw_params = iwl5000_hw_set_hw_params,
.set_channel_switch = iwl5000_hw_channel_switch,
.temperature = iwlagn_temperature,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
+ .no_idle_support = true,
};
-struct iwl_lib_ops iwl5150_lib = {
+const struct iwl_dvm_cfg iwl_dvm_5150_cfg = {
.set_hw_params = iwl5150_hw_set_hw_params,
.set_channel_switch = iwl5000_hw_channel_switch,
.temperature = iwl5150_temperature,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
+ .no_idle_support = true,
+ .no_xtal_calib = true,
};
@@ -584,16 +634,59 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
return err;
}
-struct iwl_lib_ops iwl6000_lib = {
+const struct iwl_dvm_cfg iwl_dvm_6000_cfg = {
.set_hw_params = iwl6000_hw_set_hw_params,
.set_channel_switch = iwl6000_hw_channel_switch,
.nic_config = iwl6000_nic_config,
.temperature = iwlagn_temperature,
+ .adv_thermal_throttle = true,
+ .support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
+};
+
+const struct iwl_dvm_cfg iwl_dvm_6005_cfg = {
+ .set_hw_params = iwl6000_hw_set_hw_params,
+ .set_channel_switch = iwl6000_hw_channel_switch,
+ .nic_config = iwl6000_nic_config,
+ .temperature = iwlagn_temperature,
+ .adv_thermal_throttle = true,
+ .support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
+ .need_temp_offset_calib = true,
+};
+
+const struct iwl_dvm_cfg iwl_dvm_6050_cfg = {
+ .set_hw_params = iwl6000_hw_set_hw_params,
+ .set_channel_switch = iwl6000_hw_channel_switch,
+ .nic_config = iwl6000_nic_config,
+ .temperature = iwlagn_temperature,
+ .adv_thermal_throttle = true,
+ .support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1500,
+};
+
+static const struct iwl_dvm_bt_params iwl6000_bt_params = {
+ /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
+ .advanced_bt_coexist = true,
+ .agg_time_limit = BT_AGG_THRESHOLD_DEF,
+ .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
+ .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
+ .bt_sco_disable = true,
};
-struct iwl_lib_ops iwl6030_lib = {
+const struct iwl_dvm_cfg iwl_dvm_6030_cfg = {
.set_hw_params = iwl6000_hw_set_hw_params,
.set_channel_switch = iwl6000_hw_channel_switch,
.nic_config = iwl6000_nic_config,
.temperature = iwlagn_temperature,
+ .adv_thermal_throttle = true,
+ .support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
+ .bt_params = &iwl6000_bt_params,
+ .need_temp_offset_calib = true,
+ .adv_pm = true,
};
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index 54f553380aa8..3d5bdc4217a8 100644
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -254,23 +254,23 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
sizeof(basic.bt3_lookup_table));
- if (priv->cfg->bt_params) {
+ if (priv->lib->bt_params) {
/*
* newer generation of devices (2000 series and newer)
* use the version 2 of the bt command
* we need to make sure sending the host command
* with correct data structure to avoid uCode assert
*/
- if (priv->cfg->bt_params->bt_session_2) {
+ if (priv->lib->bt_params->bt_session_2) {
bt_cmd_v2.prio_boost = cpu_to_le32(
- priv->cfg->bt_params->bt_prio_boost);
+ priv->lib->bt_params->bt_prio_boost);
bt_cmd_v2.tx_prio_boost = 0;
bt_cmd_v2.rx_prio_boost = 0;
} else {
/* older version only has 8 bits */
- WARN_ON(priv->cfg->bt_params->bt_prio_boost & ~0xFF);
+ WARN_ON(priv->lib->bt_params->bt_prio_boost & ~0xFF);
bt_cmd_v1.prio_boost =
- priv->cfg->bt_params->bt_prio_boost;
+ priv->lib->bt_params->bt_prio_boost;
bt_cmd_v1.tx_prio_boost = 0;
bt_cmd_v1.rx_prio_boost = 0;
}
@@ -330,7 +330,7 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
priv->bt_full_concurrent ?
"full concurrency" : "3-wire");
- if (priv->cfg->bt_params->bt_session_2) {
+ if (priv->lib->bt_params->bt_session_2) {
memcpy(&bt_cmd_v2.basic, &basic,
sizeof(basic));
ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
@@ -758,8 +758,8 @@ static bool is_single_rx_stream(struct iwl_priv *priv)
*/
static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
{
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist &&
+ if (priv->lib->bt_params &&
+ priv->lib->bt_params->advanced_bt_coexist &&
(priv->bt_full_concurrent ||
priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
/*
@@ -830,8 +830,8 @@ void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
else
active_chains = priv->nvm_data->valid_rx_ant;
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist &&
+ if (priv->lib->bt_params &&
+ priv->lib->bt_params->advanced_bt_coexist &&
(priv->bt_full_concurrent ||
priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
/*
@@ -1288,12 +1288,6 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
if (!(cmd->flags & CMD_ASYNC))
lockdep_assert_held(&priv->mutex);
- if (priv->ucode_owner == IWL_OWNERSHIP_TM &&
- !(cmd->flags & CMD_ON_DEMAND)) {
- IWL_DEBUG_HC(priv, "tm own the uCode, no regular hcmd send\n");
- return -EIO;
- }
-
return iwl_trans_send_cmd(priv->trans, cmd);
}
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index cab23af0be9e..822f1a00efbb 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -208,20 +208,21 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
priv->trans->ops->d3_suspend &&
priv->trans->ops->d3_resume &&
device_can_wakeup(priv->trans->dev)) {
- hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
- WIPHY_WOWLAN_DISCONNECT |
- WIPHY_WOWLAN_EAP_IDENTITY_REQ |
- WIPHY_WOWLAN_RFKILL_RELEASE;
+ priv->wowlan_support.flags = WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_EAP_IDENTITY_REQ |
+ WIPHY_WOWLAN_RFKILL_RELEASE;
if (!iwlwifi_mod_params.sw_crypto)
- hw->wiphy->wowlan.flags |=
+ priv->wowlan_support.flags |=
WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
WIPHY_WOWLAN_GTK_REKEY_FAILURE;
- hw->wiphy->wowlan.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS;
- hw->wiphy->wowlan.pattern_min_len =
+ priv->wowlan_support.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS;
+ priv->wowlan_support.pattern_min_len =
IWLAGN_WOWLAN_MIN_PATTERN_LEN;
- hw->wiphy->wowlan.pattern_max_len =
+ priv->wowlan_support.pattern_max_len =
IWLAGN_WOWLAN_MAX_PATTERN_LEN;
+ hw->wiphy->wowlan = &priv->wowlan_support;
}
#endif
@@ -426,7 +427,11 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
if (ret)
goto error;
- iwl_trans_d3_suspend(priv->trans);
+ /* let the ucode operate on its own */
+ iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET,
+ CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+
+ iwl_trans_d3_suspend(priv->trans, false);
goto out;
@@ -500,7 +505,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
/* we'll clear ctx->vif during iwlagn_prepare_restart() */
vif = ctx->vif;
- ret = iwl_trans_d3_resume(priv->trans, &d3_status);
+ ret = iwl_trans_d3_resume(priv->trans, &d3_status, false);
if (ret)
goto out_unlock;
@@ -509,6 +514,10 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
goto out_unlock;
}
+ /* uCode is no longer operating by itself */
+ iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+
base = priv->device_pointers.error_event_table;
if (!iwlagn_hw_valid_rtc_data_addr(base)) {
IWL_WARN(priv, "Invalid error table during resume!\n");
@@ -1276,8 +1285,8 @@ static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(priv, "enter\n");
mutex_lock(&priv->mutex);
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
+ if (priv->lib->bt_params &&
+ priv->lib->bt_params->advanced_bt_coexist) {
if (rssi_event == RSSI_EVENT_LOW)
priv->bt_enable_pspoll = true;
else if (rssi_event == RSSI_EVENT_HIGH)
@@ -1387,7 +1396,7 @@ static int iwl_setup_interface(struct iwl_priv *priv,
return err;
}
- if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist &&
+ if (priv->lib->bt_params && priv->lib->bt_params->advanced_bt_coexist &&
vif->type == NL80211_IFTYPE_ADHOC) {
/*
* pretend to have high BT traffic as long as we
@@ -1757,8 +1766,6 @@ struct ieee80211_ops iwlagn_hw_ops = {
.remain_on_channel = iwlagn_mac_remain_on_channel,
.cancel_remain_on_channel = iwlagn_mac_cancel_remain_on_channel,
.rssi_callback = iwlagn_mac_rssi_callback,
- CFG80211_TESTMODE_CMD(iwlagn_mac_testmode_cmd)
- CFG80211_TESTMODE_DUMP(iwlagn_mac_testmode_dump)
.set_tim = iwlagn_mac_set_tim,
};
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 74d7572e7091..1531a4fc0960 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -615,7 +615,7 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
priv->thermal_throttle.ct_kill_toggle = false;
- if (priv->cfg->base_params->support_ct_kill_exit) {
+ if (priv->lib->support_ct_kill_exit) {
adv_cmd.critical_temperature_enter =
cpu_to_le32(priv->hw_params.ct_kill_threshold);
adv_cmd.critical_temperature_exit =
@@ -732,10 +732,10 @@ int iwl_alive_start(struct iwl_priv *priv)
}
/* download priority table before any calibration request */
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
+ if (priv->lib->bt_params &&
+ priv->lib->bt_params->advanced_bt_coexist) {
/* Configure Bluetooth device coexistence support */
- if (priv->cfg->bt_params->bt_sco_disable)
+ if (priv->lib->bt_params->bt_sco_disable)
priv->bt_enable_pspoll = false;
else
priv->bt_enable_pspoll = true;
@@ -758,7 +758,7 @@ int iwl_alive_start(struct iwl_priv *priv)
BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
if (ret)
return ret;
- } else {
+ } else if (priv->lib->bt_params) {
/*
* default is 2-wire BT coexexistence support
*/
@@ -873,9 +873,9 @@ void iwl_down(struct iwl_priv *priv)
priv->bt_status = 0;
priv->cur_rssi_ctx = NULL;
priv->bt_is_sco = 0;
- if (priv->cfg->bt_params)
+ if (priv->lib->bt_params)
priv->bt_traffic_load =
- priv->cfg->bt_params->bt_init_traffic_load;
+ priv->lib->bt_params->bt_init_traffic_load;
else
priv->bt_traffic_load = 0;
priv->bt_full_concurrent = false;
@@ -1058,7 +1058,7 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
iwl_setup_scan_deferred_work(priv);
- if (priv->cfg->bt_params)
+ if (priv->lib->bt_params)
iwlagn_bt_setup_deferred_work(priv);
init_timer(&priv->statistics_periodic);
@@ -1072,7 +1072,7 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
void iwl_cancel_deferred_work(struct iwl_priv *priv)
{
- if (priv->cfg->bt_params)
+ if (priv->lib->bt_params)
iwlagn_bt_cancel_deferred_work(priv);
cancel_work_sync(&priv->run_time_calib_work);
@@ -1098,16 +1098,13 @@ static int iwl_init_drv(struct iwl_priv *priv)
priv->band = IEEE80211_BAND_2GHZ;
- priv->plcp_delta_threshold =
- priv->cfg->base_params->plcp_delta_threshold;
+ priv->plcp_delta_threshold = priv->lib->plcp_delta_threshold;
priv->iw_mode = NL80211_IFTYPE_STATION;
priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
priv->agg_tids_count = 0;
- priv->ucode_owner = IWL_OWNERSHIP_DRIVER;
-
priv->rx_statistics_jiffies = jiffies;
/* Choose which receivers/antennas to use */
@@ -1116,8 +1113,8 @@ static int iwl_init_drv(struct iwl_priv *priv)
iwl_init_scan_params(priv);
/* init bt coex */
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
+ if (priv->lib->bt_params &&
+ priv->lib->bt_params->advanced_bt_coexist) {
priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
@@ -1173,12 +1170,6 @@ static void iwl_option_config(struct iwl_priv *priv)
IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TRACING disabled\n");
#endif
-#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
- IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TESTMODE enabled\n");
-#else
- IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TESTMODE disabled\n");
-#endif
-
#ifdef CONFIG_IWLWIFI_P2P
IWL_INFO(priv, "CONFIG_IWLWIFI_P2P enabled\n");
#else
@@ -1264,31 +1255,37 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
switch (priv->cfg->device_family) {
case IWL_DEVICE_FAMILY_1000:
case IWL_DEVICE_FAMILY_100:
- priv->lib = &iwl1000_lib;
+ priv->lib = &iwl_dvm_1000_cfg;
break;
case IWL_DEVICE_FAMILY_2000:
+ priv->lib = &iwl_dvm_2000_cfg;
+ break;
case IWL_DEVICE_FAMILY_105:
- priv->lib = &iwl2000_lib;
+ priv->lib = &iwl_dvm_105_cfg;
break;
case IWL_DEVICE_FAMILY_2030:
case IWL_DEVICE_FAMILY_135:
- priv->lib = &iwl2030_lib;
+ priv->lib = &iwl_dvm_2030_cfg;
break;
case IWL_DEVICE_FAMILY_5000:
- priv->lib = &iwl5000_lib;
+ priv->lib = &iwl_dvm_5000_cfg;
break;
case IWL_DEVICE_FAMILY_5150:
- priv->lib = &iwl5150_lib;
+ priv->lib = &iwl_dvm_5150_cfg;
break;
case IWL_DEVICE_FAMILY_6000:
- case IWL_DEVICE_FAMILY_6005:
case IWL_DEVICE_FAMILY_6000i:
+ priv->lib = &iwl_dvm_6000_cfg;
+ break;
+ case IWL_DEVICE_FAMILY_6005:
+ priv->lib = &iwl_dvm_6005_cfg;
+ break;
case IWL_DEVICE_FAMILY_6050:
case IWL_DEVICE_FAMILY_6150:
- priv->lib = &iwl6000_lib;
+ priv->lib = &iwl_dvm_6050_cfg;
break;
case IWL_DEVICE_FAMILY_6030:
- priv->lib = &iwl6030_lib;
+ priv->lib = &iwl_dvm_6030_cfg;
break;
default:
break;
@@ -1350,8 +1347,8 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
IWL_BT_ANTENNA_COUPLING_THRESHOLD) ?
true : false;
- /* enable/disable bt channel inhibition */
- priv->bt_ch_announce = iwlwifi_mod_params.bt_ch_announce;
+ /* bt channel inhibition enabled*/
+ priv->bt_ch_announce = true;
IWL_DEBUG_INFO(priv, "BT channel inhibition is %s\n",
(priv->bt_ch_announce) ? "On" : "Off");
@@ -1446,7 +1443,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
********************/
iwl_setup_deferred_work(priv);
iwl_setup_rx_handlers(priv);
- iwl_testmode_init(priv);
iwl_power_initialize(priv);
iwl_tt_initialize(priv);
@@ -1483,7 +1479,6 @@ out_mac80211_unregister:
iwlagn_mac_unregister(priv);
out_destroy_workqueue:
iwl_tt_exit(priv);
- iwl_testmode_free(priv);
iwl_cancel_deferred_work(priv);
destroy_workqueue(priv->workqueue);
priv->workqueue = NULL;
@@ -1505,7 +1500,6 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
- iwl_testmode_free(priv);
iwlagn_mac_unregister(priv);
iwl_tt_exit(priv);
@@ -1854,14 +1848,9 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
return pos;
}
-#ifdef CONFIG_IWLWIFI_DEBUG
if (!(iwl_have_debug_level(IWL_DL_FW_ERRORS)) && !full_log)
size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
-#else
- size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
- ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
-#endif
IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
size);
@@ -1905,10 +1894,8 @@ static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
unsigned int reload_msec;
unsigned long reload_jiffies;
-#ifdef CONFIG_IWLWIFI_DEBUG
if (iwl_have_debug_level(IWL_DL_FW_ERRORS))
iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS);
-#endif
/* uCode is no longer loaded. */
priv->ucode_loaded = false;
diff --git a/drivers/net/wireless/iwlwifi/dvm/power.c b/drivers/net/wireless/iwlwifi/dvm/power.c
index bd69018d07a9..77cb59712235 100644
--- a/drivers/net/wireless/iwlwifi/dvm/power.c
+++ b/drivers/net/wireless/iwlwifi/dvm/power.c
@@ -163,7 +163,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
u8 skip;
u32 slp_itrvl;
- if (priv->cfg->adv_pm) {
+ if (priv->lib->adv_pm) {
table = apm_range_2;
if (period <= IWL_DTIM_RANGE_1_MAX)
table = apm_range_1;
@@ -217,7 +217,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
if (iwl_advanced_bt_coexist(priv)) {
- if (!priv->cfg->bt_params->bt_sco_disable)
+ if (!priv->lib->bt_params->bt_sco_disable)
cmd->flags |= IWL_POWER_BT_SCO_ENA;
else
cmd->flags &= ~IWL_POWER_BT_SCO_ENA;
@@ -293,7 +293,7 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
if (priv->wowlan)
iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper);
- else if (!priv->cfg->base_params->no_idle_support &&
+ else if (!priv->lib->no_idle_support &&
priv->hw->conf.flags & IEEE80211_CONF_IDLE)
iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20);
else if (iwl_tt_is_low_power_state(priv)) {
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 10fbb176cc8e..1b693944123b 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -351,12 +351,6 @@ static void rs_program_fix_rate(struct iwl_priv *priv,
lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
-#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
- /* testmode has higher priority to overwirte the fixed rate */
- if (priv->tm_fixed_rate)
- lq_sta->dbg_fixed_rate = priv->tm_fixed_rate;
-#endif
-
IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
@@ -419,23 +413,18 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
load = rs_tl_get_load(lq_data, tid);
- if ((iwlwifi_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) {
- IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
- sta->addr, tid);
- ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
- if (ret == -EAGAIN) {
- /*
- * driver and mac80211 is out of sync
- * this might be cause by reloading firmware
- * stop the tx ba session here
- */
- IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
- tid);
- ieee80211_stop_tx_ba_session(sta, tid);
- }
- } else {
- IWL_DEBUG_HT(priv, "Aggregation not enabled for tid %d "
- "because load = %u\n", tid, load);
+ IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
+ sta->addr, tid);
+ ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
+ if (ret == -EAGAIN) {
+ /*
+ * driver and mac80211 is out of sync
+ * this might be cause by reloading firmware
+ * stop the tx ba session here
+ */
+ IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
+ tid);
+ ieee80211_stop_tx_ba_session(sta, tid);
}
return ret;
}
@@ -1083,12 +1072,7 @@ done:
if (sta && sta->supp_rates[sband->band])
rs_rate_scale_perform(priv, skb, sta, lq_sta);
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_IWLWIFI_DEVICE_TESTMODE)
- if ((priv->tm_fixed_rate) &&
- (priv->tm_fixed_rate != lq_sta->dbg_fixed_rate))
- rs_program_fix_rate(priv, lq_sta);
-#endif
- if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist)
+ if (priv->lib->bt_params && priv->lib->bt_params->advanced_bt_coexist)
rs_bt_update_lq(priv, ctx, lq_sta);
}
@@ -2913,9 +2897,6 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
if (sband->band == IEEE80211_BAND_5GHZ)
lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
lq_sta->is_agg = 0;
-#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
- priv->tm_fixed_rate = 0;
-#endif
#ifdef CONFIG_MAC80211_DEBUGFS
lq_sta->dbg_fixed_rate = 0;
#endif
@@ -3064,11 +3045,11 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
* overwrite if needed, pass aggregation time limit
* to uCode in uSec
*/
- if (priv && priv->cfg->bt_params &&
- priv->cfg->bt_params->agg_time_limit &&
+ if (priv && priv->lib->bt_params &&
+ priv->lib->bt_params->agg_time_limit &&
priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
lq_cmd->agg_params.agg_time_limit =
- cpu_to_le16(priv->cfg->bt_params->agg_time_limit);
+ cpu_to_le16(priv->lib->bt_params->agg_time_limit);
}
static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
index a4eed2055fdb..d71776dd1e6a 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rx.c
@@ -335,8 +335,7 @@ static void iwlagn_recover_from_statistics(struct iwl_priv *priv,
if (msecs < 99)
return;
- if (iwlwifi_mod_params.plcp_check &&
- !iwlagn_good_plcp_health(priv, cur_ofdm, cur_ofdm_ht, msecs))
+ if (!iwlagn_good_plcp_health(priv, cur_ofdm, cur_ofdm_ht, msecs))
iwl_force_rf_reset(priv, false);
}
@@ -1102,7 +1101,7 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv)
iwl_notification_wait_init(&priv->notif_wait);
/* Set up BT Rx handlers */
- if (priv->cfg->bt_params)
+ if (priv->lib->bt_params)
iwlagn_bt_rx_handler_setup(priv);
}
@@ -1120,32 +1119,17 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
*/
iwl_notification_wait_notify(&priv->notif_wait, pkt);
-#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
- /*
- * RX data may be forwarded to userspace in one
- * of two cases: the user owns the fw through testmode or when
- * the user requested to monitor the rx w/o affecting the regular flow.
- * In these cases the iwl_test object will handle forwarding the rx
- * data to user space.
- * Note that if the ownership flag != IWL_OWNERSHIP_TM the flow
- * continues.
- */
- iwl_test_rx(&priv->tst, rxb);
-#endif
-
- if (priv->ucode_owner != IWL_OWNERSHIP_TM) {
- /* Based on type of command response or notification,
- * handle those that need handling via function in
- * rx_handlers table. See iwl_setup_rx_handlers() */
- if (priv->rx_handlers[pkt->hdr.cmd]) {
- priv->rx_handlers_stats[pkt->hdr.cmd]++;
- err = priv->rx_handlers[pkt->hdr.cmd] (priv, rxb, cmd);
- } else {
- /* No handling needed */
- IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n",
- iwl_dvm_get_cmd_string(pkt->hdr.cmd),
- pkt->hdr.cmd);
- }
+ /* Based on type of command response or notification,
+ * handle those that need handling via function in
+ * rx_handlers table. See iwl_setup_rx_handlers() */
+ if (priv->rx_handlers[pkt->hdr.cmd]) {
+ priv->rx_handlers_stats[pkt->hdr.cmd]++;
+ err = priv->rx_handlers[pkt->hdr.cmd] (priv, rxb, cmd);
+ } else {
+ /* No handling needed */
+ IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n",
+ iwl_dvm_get_cmd_string(pkt->hdr.cmd),
+ pkt->hdr.cmd);
}
return err;
}
diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
index d69b55866714..8c686a5b90ac 100644
--- a/drivers/net/wireless/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/dvm/scan.c
@@ -801,8 +801,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
* Internal scans are passive, so we can indiscriminately set
* the BT ignore flag on 2.4 GHz since it applies to TX only.
*/
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist)
+ if (priv->lib->bt_params &&
+ priv->lib->bt_params->advanced_bt_coexist)
scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
break;
case IEEE80211_BAND_5GHZ:
@@ -844,8 +844,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
band = priv->scan_band;
if (band == IEEE80211_BAND_2GHZ &&
- priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
+ priv->lib->bt_params &&
+ priv->lib->bt_params->advanced_bt_coexist) {
/* transmit 2.4 GHz probes only on first antenna */
scan_tx_antennas = first_antenna(scan_tx_antennas);
}
@@ -873,8 +873,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
rx_ant = first_antenna(active_chains);
}
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist &&
+ if (priv->lib->bt_params &&
+ priv->lib->bt_params->advanced_bt_coexist &&
priv->bt_full_concurrent) {
/* operated as 1x1 in full concurrency mode */
rx_ant = first_antenna(rx_ant);
diff --git a/drivers/net/wireless/iwlwifi/dvm/testmode.c b/drivers/net/wireless/iwlwifi/dvm/testmode.c
deleted file mode 100644
index b89b9d9b9969..000000000000
--- a/drivers/net/wireless/iwlwifi/dvm/testmode.c
+++ /dev/null
@@ -1,471 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/dma-mapping.h>
-#include <net/net_namespace.h>
-#include <linux/netdevice.h>
-#include <net/cfg80211.h>
-#include <net/mac80211.h>
-#include <net/netlink.h>
-
-#include "iwl-debug.h"
-#include "iwl-trans.h"
-#include "dev.h"
-#include "agn.h"
-#include "iwl-test.h"
-#include "iwl-testmode.h"
-
-static int iwl_testmode_send_cmd(struct iwl_op_mode *op_mode,
- struct iwl_host_cmd *cmd)
-{
- struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
- return iwl_dvm_send_cmd(priv, cmd);
-}
-
-static bool iwl_testmode_valid_hw_addr(u32 addr)
-{
- if (iwlagn_hw_valid_rtc_data_addr(addr))
- return true;
-
- if (IWLAGN_RTC_INST_LOWER_BOUND <= addr &&
- addr < IWLAGN_RTC_INST_UPPER_BOUND)
- return true;
-
- return false;
-}
-
-static u32 iwl_testmode_get_fw_ver(struct iwl_op_mode *op_mode)
-{
- struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
- return priv->fw->ucode_ver;
-}
-
-static struct sk_buff*
-iwl_testmode_alloc_reply(struct iwl_op_mode *op_mode, int len)
-{
- struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
- return cfg80211_testmode_alloc_reply_skb(priv->hw->wiphy, len);
-}
-
-static int iwl_testmode_reply(struct iwl_op_mode *op_mode, struct sk_buff *skb)
-{
- return cfg80211_testmode_reply(skb);
-}
-
-static struct sk_buff *iwl_testmode_alloc_event(struct iwl_op_mode *op_mode,
- int len)
-{
- struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
- return cfg80211_testmode_alloc_event_skb(priv->hw->wiphy, len,
- GFP_ATOMIC);
-}
-
-static void iwl_testmode_event(struct iwl_op_mode *op_mode, struct sk_buff *skb)
-{
- return cfg80211_testmode_event(skb, GFP_ATOMIC);
-}
-
-static struct iwl_test_ops tst_ops = {
- .send_cmd = iwl_testmode_send_cmd,
- .valid_hw_addr = iwl_testmode_valid_hw_addr,
- .get_fw_ver = iwl_testmode_get_fw_ver,
- .alloc_reply = iwl_testmode_alloc_reply,
- .reply = iwl_testmode_reply,
- .alloc_event = iwl_testmode_alloc_event,
- .event = iwl_testmode_event,
-};
-
-void iwl_testmode_init(struct iwl_priv *priv)
-{
- iwl_test_init(&priv->tst, priv->trans, &tst_ops);
-}
-
-void iwl_testmode_free(struct iwl_priv *priv)
-{
- iwl_test_free(&priv->tst);
-}
-
-static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
-{
- struct iwl_notification_wait calib_wait;
- static const u8 calib_complete[] = {
- CALIBRATION_COMPLETE_NOTIFICATION
- };
- int ret;
-
- iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
- calib_complete, ARRAY_SIZE(calib_complete),
- NULL, NULL);
- ret = iwl_init_alive_start(priv);
- if (ret) {
- IWL_ERR(priv, "Fail init calibration: %d\n", ret);
- goto cfg_init_calib_error;
- }
-
- ret = iwl_wait_notification(&priv->notif_wait, &calib_wait, 2 * HZ);
- if (ret)
- IWL_ERR(priv, "Error detecting"
- " CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret);
- return ret;
-
-cfg_init_calib_error:
- iwl_remove_notification(&priv->notif_wait, &calib_wait);
- return ret;
-}
-
-/*
- * This function handles the user application commands for driver.
- *
- * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
- * handlers respectively.
- *
- * If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned
- * value of the actual command execution is replied to the user application.
- *
- * If there's any message responding to the user space, IWL_TM_ATTR_SYNC_RSP
- * is used for carry the message while IWL_TM_ATTR_COMMAND must set to
- * IWL_TM_CMD_DEV2APP_SYNC_RSP.
- *
- * @hw: ieee80211_hw object that represents the device
- * @tb: gnl message fields from the user space
- */
-static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
-{
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- struct iwl_trans *trans = priv->trans;
- struct sk_buff *skb;
- unsigned char *rsp_data_ptr = NULL;
- int status = 0, rsp_data_len = 0;
- u32 inst_size = 0, data_size = 0;
- const struct fw_img *img;
-
- switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
- case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
- rsp_data_ptr = (unsigned char *)priv->cfg->name;
- rsp_data_len = strlen(priv->cfg->name);
- skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
- rsp_data_len + 20);
- if (!skb) {
- IWL_ERR(priv, "Memory allocation fail\n");
- return -ENOMEM;
- }
- if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
- IWL_TM_CMD_DEV2APP_SYNC_RSP) ||
- nla_put(skb, IWL_TM_ATTR_SYNC_RSP,
- rsp_data_len, rsp_data_ptr))
- goto nla_put_failure;
- status = cfg80211_testmode_reply(skb);
- if (status < 0)
- IWL_ERR(priv, "Error sending msg : %d\n", status);
- break;
-
- case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
- status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
- if (status)
- IWL_ERR(priv, "Error loading init ucode: %d\n", status);
- break;
-
- case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
- iwl_testmode_cfg_init_calib(priv);
- priv->ucode_loaded = false;
- iwl_trans_stop_device(trans);
- break;
-
- case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
- status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
- if (status) {
- IWL_ERR(priv,
- "Error loading runtime ucode: %d\n", status);
- break;
- }
- status = iwl_alive_start(priv);
- if (status)
- IWL_ERR(priv,
- "Error starting the device: %d\n", status);
- break;
-
- case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
- iwl_scan_cancel_timeout(priv, 200);
- priv->ucode_loaded = false;
- iwl_trans_stop_device(trans);
- status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_WOWLAN);
- if (status) {
- IWL_ERR(priv,
- "Error loading WOWLAN ucode: %d\n", status);
- break;
- }
- status = iwl_alive_start(priv);
- if (status)
- IWL_ERR(priv,
- "Error starting the device: %d\n", status);
- break;
-
- case IWL_TM_CMD_APP2DEV_GET_EEPROM:
- if (priv->eeprom_blob) {
- skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
- priv->eeprom_blob_size + 20);
- if (!skb) {
- IWL_ERR(priv, "Memory allocation fail\n");
- return -ENOMEM;
- }
- if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
- IWL_TM_CMD_DEV2APP_EEPROM_RSP) ||
- nla_put(skb, IWL_TM_ATTR_EEPROM,
- priv->eeprom_blob_size,
- priv->eeprom_blob))
- goto nla_put_failure;
- status = cfg80211_testmode_reply(skb);
- if (status < 0)
- IWL_ERR(priv, "Error sending msg : %d\n",
- status);
- } else
- return -ENODATA;
- break;
-
- case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
- if (!tb[IWL_TM_ATTR_FIXRATE]) {
- IWL_ERR(priv, "Missing fixrate setting\n");
- return -ENOMSG;
- }
- priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]);
- break;
-
- case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
- skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20 + 8);
- if (!skb) {
- IWL_ERR(priv, "Memory allocation fail\n");
- return -ENOMEM;
- }
- if (!priv->ucode_loaded) {
- IWL_ERR(priv, "No uCode has not been loaded\n");
- return -EINVAL;
- } else {
- img = &priv->fw->img[priv->cur_ucode];
- inst_size = img->sec[IWL_UCODE_SECTION_INST].len;
- data_size = img->sec[IWL_UCODE_SECTION_DATA].len;
- }
- if (nla_put_u32(skb, IWL_TM_ATTR_FW_TYPE, priv->cur_ucode) ||
- nla_put_u32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size) ||
- nla_put_u32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size))
- goto nla_put_failure;
- status = cfg80211_testmode_reply(skb);
- if (status < 0)
- IWL_ERR(priv, "Error sending msg : %d\n", status);
- break;
-
- default:
- IWL_ERR(priv, "Unknown testmode driver command ID\n");
- return -ENOSYS;
- }
- return status;
-
-nla_put_failure:
- kfree_skb(skb);
- return -EMSGSIZE;
-}
-
-/*
- * This function handles the user application switch ucode ownership.
- *
- * It retrieves the mandatory fields IWL_TM_ATTR_UCODE_OWNER and
- * decide who the current owner of the uCode
- *
- * If the current owner is OWNERSHIP_TM, then the only host command
- * can deliver to uCode is from testmode, all the other host commands
- * will dropped.
- *
- * default driver is the owner of uCode in normal operational mode
- *
- * @hw: ieee80211_hw object that represents the device
- * @tb: gnl message fields from the user space
- */
-static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
-{
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- u8 owner;
-
- if (!tb[IWL_TM_ATTR_UCODE_OWNER]) {
- IWL_ERR(priv, "Missing ucode owner\n");
- return -ENOMSG;
- }
-
- owner = nla_get_u8(tb[IWL_TM_ATTR_UCODE_OWNER]);
- if (owner == IWL_OWNERSHIP_DRIVER) {
- priv->ucode_owner = owner;
- iwl_test_enable_notifications(&priv->tst, false);
- } else if (owner == IWL_OWNERSHIP_TM) {
- priv->ucode_owner = owner;
- iwl_test_enable_notifications(&priv->tst, true);
- } else {
- IWL_ERR(priv, "Invalid owner\n");
- return -EINVAL;
- }
- return 0;
-}
-
-/* The testmode gnl message handler that takes the gnl message from the
- * user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
- * invoke the corresponding handlers.
- *
- * This function is invoked when there is user space application sending
- * gnl message through the testmode tunnel NL80211_CMD_TESTMODE regulated
- * by nl80211.
- *
- * It retrieves the mandatory field, IWL_TM_ATTR_COMMAND, before
- * dispatching it to the corresponding handler.
- *
- * If IWL_TM_ATTR_COMMAND is missing, -ENOMSG is replied to user application;
- * -ENOSYS is replied to the user application if the command is unknown;
- * Otherwise, the command is dispatched to the respective handler.
- *
- * @hw: ieee80211_hw object that represents the device
- * @data: pointer to user space message
- * @len: length in byte of @data
- */
-int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
-{
- struct nlattr *tb[IWL_TM_ATTR_MAX];
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- int result;
-
- result = iwl_test_parse(&priv->tst, tb, data, len);
- if (result)
- return result;
-
- /* in case multiple accesses to the device happens */
- mutex_lock(&priv->mutex);
- switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
- case IWL_TM_CMD_APP2DEV_UCODE:
- case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
- case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
- case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
- case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
- case IWL_TM_CMD_APP2DEV_END_TRACE:
- case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
- case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
- case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
- case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
- case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
- result = iwl_test_handle_cmd(&priv->tst, tb);
- break;
-
- case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
- case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
- case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
- case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
- case IWL_TM_CMD_APP2DEV_GET_EEPROM:
- case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
- case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
- case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
- IWL_DEBUG_INFO(priv, "testmode cmd to driver\n");
- result = iwl_testmode_driver(hw, tb);
- break;
-
- case IWL_TM_CMD_APP2DEV_OWNERSHIP:
- IWL_DEBUG_INFO(priv, "testmode change uCode ownership\n");
- result = iwl_testmode_ownership(hw, tb);
- break;
-
- default:
- IWL_ERR(priv, "Unknown testmode command\n");
- result = -ENOSYS;
- break;
- }
- mutex_unlock(&priv->mutex);
-
- if (result)
- IWL_ERR(priv, "Test cmd failed result=%d\n", result);
- return result;
-}
-
-int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
- struct netlink_callback *cb,
- void *data, int len)
-{
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- int result;
- u32 cmd;
-
- if (cb->args[3]) {
- /* offset by 1 since commands start at 0 */
- cmd = cb->args[3] - 1;
- } else {
- struct nlattr *tb[IWL_TM_ATTR_MAX];
-
- result = iwl_test_parse(&priv->tst, tb, data, len);
- if (result)
- return result;
-
- cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
- cb->args[3] = cmd + 1;
- }
-
- /* in case multiple accesses to the device happens */
- mutex_lock(&priv->mutex);
- result = iwl_test_dump(&priv->tst, cmd, skb, cb);
- mutex_unlock(&priv->mutex);
- return result;
-}
diff --git a/drivers/net/wireless/iwlwifi/dvm/tt.c b/drivers/net/wireless/iwlwifi/dvm/tt.c
index 03f9bc01c0cc..fbeee081ee2f 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tt.c
@@ -627,7 +627,7 @@ void iwl_tt_initialize(struct iwl_priv *priv)
INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit);
- if (priv->cfg->base_params->adv_thermal_throttle) {
+ if (priv->lib->adv_thermal_throttle) {
IWL_DEBUG_TEMP(priv, "Advanced Thermal Throttling\n");
tt->restriction = kcalloc(IWL_TI_STATE_MAX,
sizeof(struct iwl_tt_restriction),
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index a900aaf47790..5ee983faa679 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -83,8 +83,8 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
else if (ieee80211_is_back_req(fc))
tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
else if (info->band == IEEE80211_BAND_2GHZ &&
- priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist &&
+ priv->lib->bt_params &&
+ priv->lib->bt_params->advanced_bt_coexist &&
(ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
ieee80211_is_reassoc_req(fc) ||
skb->protocol == cpu_to_be16(ETH_P_PAE)))
@@ -162,18 +162,6 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
if (ieee80211_is_data(fc)) {
tx_cmd->initial_rate_index = 0;
tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
-#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
- if (priv->tm_fixed_rate) {
- /*
- * rate overwrite by testmode
- * we not only send lq command to change rate
- * we also re-enforce per data pkt base.
- */
- tx_cmd->tx_flags &= ~TX_CMD_FLG_STA_RATE_MSK;
- memcpy(&tx_cmd->rate_n_flags, &priv->tm_fixed_rate,
- sizeof(tx_cmd->rate_n_flags));
- }
-#endif
return;
} else if (ieee80211_is_back_req(fc))
tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
@@ -202,8 +190,8 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
rate_flags |= RATE_MCS_CCK_MSK;
/* Set up antennas */
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist &&
+ if (priv->lib->bt_params &&
+ priv->lib->bt_params->advanced_bt_coexist &&
priv->bt_full_concurrent) {
/* operated as 1x1 in full concurrency mode */
priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
@@ -986,8 +974,8 @@ static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
* notification again.
*/
if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
- priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
+ priv->lib->bt_params &&
+ priv->lib->bt_params->advanced_bt_coexist) {
IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n");
}
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index 0a1cdc5e856b..86270b69cd02 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -132,8 +132,8 @@ int iwl_init_alive_start(struct iwl_priv *priv)
{
int ret;
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
+ if (priv->lib->bt_params &&
+ priv->lib->bt_params->advanced_bt_coexist) {
/*
* Tell uCode we are ready to perform calibration
* need to perform this before any calibration
@@ -155,8 +155,8 @@ int iwl_init_alive_start(struct iwl_priv *priv)
* temperature offset calibration is only needed for runtime ucode,
* so prepare the value now.
*/
- if (priv->cfg->need_temp_offset_calib) {
- if (priv->cfg->temp_offset_v2)
+ if (priv->lib->need_temp_offset_calib) {
+ if (priv->lib->temp_offset_v2)
return iwl_set_temperature_offset_calib_v2(priv);
else
return iwl_set_temperature_offset_calib(priv);
@@ -277,7 +277,7 @@ static int iwl_alive_notify(struct iwl_priv *priv)
if (ret)
return ret;
- if (!priv->cfg->no_xtal_calib) {
+ if (!priv->lib->no_xtal_calib) {
ret = iwl_set_Xtal_calib(priv);
if (ret)
return ret;
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index c080ae3070b2..0d2afe098afc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -60,9 +60,6 @@ static const struct iwl_base_params iwl1000_base_params = {
.max_ll_items = OTP_MAX_LL_ITEMS_1000,
.shadow_ram_support = false,
.led_compensation = 51,
- .support_ct_kill_exit = true,
- .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
- .chain_noise_scale = 1000,
.wd_timeout = IWL_WATCHDOG_DISABLED,
.max_event_log_size = 128,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index a6ddd2f9fba0..c727ec7c90a6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -72,14 +72,9 @@ static const struct iwl_base_params iwl2000_base_params = {
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
.shadow_ram_support = true,
.led_compensation = 51,
- .adv_thermal_throttle = true,
- .support_ct_kill_exit = true,
- .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
- .chain_noise_scale = 1000,
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
- .hd_v2 = true,
};
@@ -90,14 +85,9 @@ static const struct iwl_base_params iwl2030_base_params = {
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
.shadow_ram_support = true,
.led_compensation = 57,
- .adv_thermal_throttle = true,
- .support_ct_kill_exit = true,
- .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
- .chain_noise_scale = 1000,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
- .hd_v2 = true,
};
static const struct iwl_ht_params iwl2000_ht_params = {
@@ -106,16 +96,6 @@ static const struct iwl_ht_params iwl2000_ht_params = {
.ht40_bands = BIT(IEEE80211_BAND_2GHZ),
};
-static const struct iwl_bt_params iwl2030_bt_params = {
- /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
- .advanced_bt_coexist = true,
- .agg_time_limit = BT_AGG_THRESHOLD_DEF,
- .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
- .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT32,
- .bt_sco_disable = true,
- .bt_session_2 = true,
-};
-
static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
.regulatory_bands = {
EEPROM_REG_BAND_1_CHANNELS,
@@ -137,12 +117,10 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
.device_family = IWL_DEVICE_FAMILY_2000, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
- .nvm_ver = EEPROM_2000_EEPROM_VERSION, \
- .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
+ .nvm_ver = EEPROM_2000_EEPROM_VERSION, \
+ .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.base_params = &iwl2000_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
- .need_temp_offset_calib = true, \
- .temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE
const struct iwl_cfg iwl2000_2bgn_cfg = {
@@ -168,12 +146,8 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
.nvm_ver = EEPROM_2000_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.base_params = &iwl2030_base_params, \
- .bt_params = &iwl2030_bt_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
- .need_temp_offset_calib = true, \
- .temp_offset_v2 = true, \
- .led_mode = IWL_LED_RF_STATE, \
- .adv_pm = true
+ .led_mode = IWL_LED_RF_STATE
const struct iwl_cfg iwl2030_2bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
@@ -193,10 +167,7 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
.nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.base_params = &iwl2000_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
- .need_temp_offset_calib = true, \
- .temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
- .adv_pm = true, \
.rx_with_siso_diversity = true
const struct iwl_cfg iwl105_bgn_cfg = {
@@ -222,12 +193,8 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
.nvm_ver = EEPROM_2000_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.base_params = &iwl2030_base_params, \
- .bt_params = &iwl2030_bt_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
- .need_temp_offset_calib = true, \
- .temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
- .adv_pm = true, \
.rx_with_siso_diversity = true
const struct iwl_cfg iwl135_bgn_cfg = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 403f3f224bf6..ecc01e1a61a1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -59,11 +59,8 @@ static const struct iwl_base_params iwl5000_base_params = {
.num_of_queues = IWLAGN_NUM_QUEUES,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
.led_compensation = 51,
- .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
- .chain_noise_scale = 1000,
.wd_timeout = IWL_WATCHDOG_DISABLED,
.max_event_log_size = 512,
- .no_idle_support = true,
};
static const struct iwl_ht_params iwl5000_ht_params = {
@@ -159,7 +156,6 @@ const struct iwl_cfg iwl5350_agn_cfg = {
.nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
.base_params = &iwl5000_base_params, \
.eeprom_params = &iwl5000_eeprom_params, \
- .no_xtal_calib = true, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index b5ab8d1bcac0..30d45e2fc193 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -82,10 +82,6 @@ static const struct iwl_base_params iwl6000_base_params = {
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
.led_compensation = 51,
- .adv_thermal_throttle = true,
- .support_ct_kill_exit = true,
- .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
- .chain_noise_scale = 1000,
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
@@ -98,10 +94,6 @@ static const struct iwl_base_params iwl6050_base_params = {
.max_ll_items = OTP_MAX_LL_ITEMS_6x50,
.shadow_ram_support = true,
.led_compensation = 51,
- .adv_thermal_throttle = true,
- .support_ct_kill_exit = true,
- .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
- .chain_noise_scale = 1500,
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 1024,
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
@@ -114,10 +106,6 @@ static const struct iwl_base_params iwl6000_g2_base_params = {
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
.led_compensation = 57,
- .adv_thermal_throttle = true,
- .support_ct_kill_exit = true,
- .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
- .chain_noise_scale = 1000,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
@@ -129,15 +117,6 @@ static const struct iwl_ht_params iwl6000_ht_params = {
.ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
};
-static const struct iwl_bt_params iwl6000_bt_params = {
- /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
- .advanced_bt_coexist = true,
- .agg_time_limit = BT_AGG_THRESHOLD_DEF,
- .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
- .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
- .bt_sco_disable = true,
-};
-
static const struct iwl_eeprom_params iwl6000_eeprom_params = {
.regulatory_bands = {
EEPROM_REG_BAND_1_CHANNELS,
@@ -163,7 +142,6 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = {
.nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
- .need_temp_offset_calib = true, \
.led_mode = IWL_LED_RF_STATE
const struct iwl_cfg iwl6005_2agn_cfg = {
@@ -217,11 +195,8 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
.nvm_ver = EEPROM_6030_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
.base_params = &iwl6000_g2_base_params, \
- .bt_params = &iwl6000_bt_params, \
.eeprom_params = &iwl6000_eeprom_params, \
- .need_temp_offset_calib = true, \
- .led_mode = IWL_LED_RF_STATE, \
- .adv_pm = true \
+ .led_mode = IWL_LED_RF_STATE
const struct iwl_cfg iwl6030_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
@@ -256,11 +231,8 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
.nvm_ver = EEPROM_6030_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
.base_params = &iwl6000_g2_base_params, \
- .bt_params = &iwl6000_bt_params, \
.eeprom_params = &iwl6000_eeprom_params, \
- .need_temp_offset_calib = true, \
- .led_mode = IWL_LED_RF_STATE, \
- .adv_pm = true
+ .led_mode = IWL_LED_RF_STATE
const struct iwl_cfg iwl6035_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 50263e87fe15..22b7fa5b971a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -67,16 +67,16 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX 6
-#define IWL3160_UCODE_API_MAX 6
+#define IWL7260_UCODE_API_MAX 7
+#define IWL3160_UCODE_API_MAX 7
/* Oldest version we won't warn about */
-#define IWL7260_UCODE_API_OK 6
-#define IWL3160_UCODE_API_OK 6
+#define IWL7260_UCODE_API_OK 7
+#define IWL3160_UCODE_API_OK 7
/* Lowest firmware API version supported */
-#define IWL7260_UCODE_API_MIN 6
-#define IWL3160_UCODE_API_MIN 6
+#define IWL7260_UCODE_API_MIN 7
+#define IWL3160_UCODE_API_MIN 7
/* NVM versions */
#define IWL7260_NVM_VERSION 0x0a1d
@@ -96,13 +96,9 @@ static const struct iwl_base_params iwl7000_base_params = {
.pll_cfg_val = 0,
.shadow_ram_support = true,
.led_compensation = 57,
- .adv_thermal_throttle = true,
- .support_ct_kill_exit = true,
- .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
- .chain_noise_scale = 1000,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
- .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+ .shadow_reg_enable = true,
};
static const struct iwl_ht_params iwl7000_ht_params = {
@@ -118,14 +114,11 @@ static const struct iwl_ht_params iwl7000_ht_params = {
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.base_params = &iwl7000_base_params, \
- /* TODO: .bt_params? */ \
- .need_temp_offset_calib = true, \
- .led_mode = IWL_LED_RF_STATE, \
- .adv_pm = true \
+ .led_mode = IWL_LED_RF_STATE
const struct iwl_cfg iwl7260_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC7260",
+ .name = "Intel(R) Dual Band Wireless AC 7260",
.fw_name_pre = IWL7260_FW_PRE,
IWL_DEVICE_7000,
.ht_params = &iwl7000_ht_params,
@@ -133,8 +126,44 @@ const struct iwl_cfg iwl7260_2ac_cfg = {
.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
};
-const struct iwl_cfg iwl3160_ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC3160",
+const struct iwl_cfg iwl7260_2n_cfg = {
+ .name = "Intel(R) Dual Band Wireless N 7260",
+ .fw_name_pre = IWL7260_FW_PRE,
+ IWL_DEVICE_7000,
+ .ht_params = &iwl7000_ht_params,
+ .nvm_ver = IWL7260_NVM_VERSION,
+ .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
+};
+
+const struct iwl_cfg iwl7260_n_cfg = {
+ .name = "Intel(R) Wireless N 7260",
+ .fw_name_pre = IWL7260_FW_PRE,
+ IWL_DEVICE_7000,
+ .ht_params = &iwl7000_ht_params,
+ .nvm_ver = IWL7260_NVM_VERSION,
+ .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
+};
+
+const struct iwl_cfg iwl3160_2ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC 3160",
+ .fw_name_pre = IWL3160_FW_PRE,
+ IWL_DEVICE_7000,
+ .ht_params = &iwl7000_ht_params,
+ .nvm_ver = IWL3160_NVM_VERSION,
+ .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
+};
+
+const struct iwl_cfg iwl3160_2n_cfg = {
+ .name = "Intel(R) Dual Band Wireless N 3160",
+ .fw_name_pre = IWL3160_FW_PRE,
+ IWL_DEVICE_7000,
+ .ht_params = &iwl7000_ht_params,
+ .nvm_ver = IWL3160_NVM_VERSION,
+ .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
+};
+
+const struct iwl_cfg iwl3160_n_cfg = {
+ .name = "Intel(R) Wireless N 3160",
.fw_name_pre = IWL3160_FW_PRE,
IWL_DEVICE_7000,
.ht_params = &iwl7000_ht_params,
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index c38aa8f77554..83b9ff6ff3ad 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -136,17 +136,9 @@ enum iwl_led_mode {
* @led_compensation: compensate on the led on/off time per HW according
* to the deviation to achieve the desired led frequency.
* The detail algorithm is described in iwl-led.c
- * @chain_noise_num_beacons: number of beacons used to compute chain noise
- * @adv_thermal_throttle: support advance thermal throttle
- * @support_ct_kill_exit: support ct kill exit condition
- * @plcp_delta_threshold: plcp error rate threshold used to trigger
- * radio tuning when there is a high receiving plcp error rate
- * @chain_noise_scale: default chain noise scale used for gain computation
* @wd_timeout: TX queues watchdog timeout
* @max_event_log_size: size of event log buffer size for ucode event logging
* @shadow_reg_enable: HW shadow register support
- * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
- * @no_idle_support: do not support idle mode
*/
struct iwl_base_params {
int eeprom_size;
@@ -157,31 +149,9 @@ struct iwl_base_params {
const u16 max_ll_items;
const bool shadow_ram_support;
u16 led_compensation;
- bool adv_thermal_throttle;
- bool support_ct_kill_exit;
- u8 plcp_delta_threshold;
- s32 chain_noise_scale;
unsigned int wd_timeout;
u32 max_event_log_size;
const bool shadow_reg_enable;
- const bool hd_v2;
- const bool no_idle_support;
-};
-
-/*
- * @advanced_bt_coexist: support advanced bt coexist
- * @bt_init_traffic_load: specify initial bt traffic load
- * @bt_prio_boost: default bt priority boost value
- * @agg_time_limit: maximum number of uSec in aggregation
- * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode
- */
-struct iwl_bt_params {
- bool advanced_bt_coexist;
- u8 bt_init_traffic_load;
- u32 bt_prio_boost;
- u16 agg_time_limit;
- bool bt_sco_disable;
- bool bt_session_2;
};
/*
@@ -231,16 +201,10 @@ struct iwl_eeprom_params {
* @nvm_calib_ver: NVM calibration version
* @lib: pointer to the lib ops
* @base_params: pointer to basic parameters
- * @ht_params: point to ht patameters
- * @bt_params: pointer to bt parameters
- * @need_temp_offset_calib: need to perform temperature offset calibration
- * @no_xtal_calib: some devices do not need crystal calibration data,
- * don't send it to those
+ * @ht_params: point to ht parameters
* @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
- * @adv_pm: advance power management
* @rx_with_siso_diversity: 1x1 device with rx antenna diversity
* @internal_wimax_coex: internal wifi/wimax combo device
- * @temp_offset_v2: support v2 of temperature offset calibration
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@@ -258,26 +222,23 @@ struct iwl_cfg {
const u32 max_inst_size;
u8 valid_tx_ant;
u8 valid_rx_ant;
+ bool bt_shared_single_ant;
u16 nvm_ver;
u16 nvm_calib_ver;
/* params not likely to change within a device family */
const struct iwl_base_params *base_params;
/* params likely to change within a device family */
const struct iwl_ht_params *ht_params;
- const struct iwl_bt_params *bt_params;
const struct iwl_eeprom_params *eeprom_params;
- const bool need_temp_offset_calib; /* if used set to true */
- const bool no_xtal_calib;
enum iwl_led_mode led_mode;
- const bool adv_pm;
const bool rx_with_siso_diversity;
const bool internal_wimax_coex;
- const bool temp_offset_v2;
};
/*
* This list declares the config structures for all devices.
*/
+#if IS_ENABLED(CONFIG_IWLDVM)
extern const struct iwl_cfg iwl5300_agn_cfg;
extern const struct iwl_cfg iwl5100_agn_cfg;
extern const struct iwl_cfg iwl5350_agn_cfg;
@@ -319,7 +280,14 @@ extern const struct iwl_cfg iwl6035_2agn_cfg;
extern const struct iwl_cfg iwl105_bgn_cfg;
extern const struct iwl_cfg iwl105_bgn_d_cfg;
extern const struct iwl_cfg iwl135_bgn_cfg;
+#endif /* CONFIG_IWLDVM */
+#if IS_ENABLED(CONFIG_IWLMVM)
extern const struct iwl_cfg iwl7260_2ac_cfg;
-extern const struct iwl_cfg iwl3160_ac_cfg;
+extern const struct iwl_cfg iwl7260_2n_cfg;
+extern const struct iwl_cfg iwl7260_n_cfg;
+extern const struct iwl_cfg iwl3160_2ac_cfg;
+extern const struct iwl_cfg iwl3160_2n_cfg;
+extern const struct iwl_cfg iwl3160_n_cfg;
+#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 20e845d4da04..a276af476e2d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -472,4 +472,23 @@
#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
+/*****************************************************************************
+ * 7000/3000 series SHR DTS addresses *
+ *****************************************************************************/
+
+/* Diode Results Register Structure: */
+enum dtd_diode_reg {
+ DTS_DIODE_REG_DIG_VAL = 0x000000FF, /* bits [7:0] */
+ DTS_DIODE_REG_VREF_LOW = 0x0000FF00, /* bits [15:8] */
+ DTS_DIODE_REG_VREF_HIGH = 0x00FF0000, /* bits [23:16] */
+ DTS_DIODE_REG_VREF_ID = 0x03000000, /* bits [25:24] */
+ DTS_DIODE_REG_PASS_ONCE = 0x80000000, /* bits [31:31] */
+ DTS_DIODE_REG_FLAGS_MSK = 0xFF000000, /* bits [31:24] */
+/* Those are the masks INSIDE the flags bit-field: */
+ DTS_DIODE_REG_FLAGS_VREFS_ID_POS = 0,
+ DTS_DIODE_REG_FLAGS_VREFS_ID = 0x00000003, /* bits [1:0] */
+ DTS_DIODE_REG_FLAGS_PASS_ONCE_POS = 7,
+ DTS_DIODE_REG_FLAGS_PASS_ONCE = 0x00000080, /* bits [7:7] */
+};
+
#endif /* !__iwl_csr_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 8cf5db7fb5c9..7edb8519c8a4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -34,7 +34,11 @@
static inline bool iwl_have_debug_level(u32 level)
{
+#ifdef CONFIG_IWLWIFI_DEBUG
return iwlwifi_mod_params.debug_level & level;
+#else
+ return false;
+#endif
}
void __iwl_err(struct device *dev, bool rfkill_prefix, bool only_trace,
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 40fed1f511e2..d0162d426f88 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1111,11 +1111,8 @@ void iwl_drv_stop(struct iwl_drv *drv)
/* shared module parameters */
struct iwl_mod_params iwlwifi_mod_params = {
.restart_fw = true,
- .plcp_check = true,
.bt_coex_active = true,
.power_level = IWL_POWER_INDEX_1,
- .bt_ch_announce = true,
- .auto_agg = true,
.wd_disable = true,
/* the rest are 0 by default */
};
@@ -1223,19 +1220,14 @@ module_param_named(antenna_coupling, iwlwifi_mod_params.ant_coupling,
MODULE_PARM_DESC(antenna_coupling,
"specify antenna coupling in dB (defualt: 0 dB)");
-module_param_named(bt_ch_inhibition, iwlwifi_mod_params.bt_ch_announce,
- bool, S_IRUGO);
-MODULE_PARM_DESC(bt_ch_inhibition,
- "Enable BT channel inhibition (default: enable)");
-
-module_param_named(plcp_check, iwlwifi_mod_params.plcp_check, bool, S_IRUGO);
-MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
-
module_param_named(wd_disable, iwlwifi_mod_params.wd_disable, int, S_IRUGO);
MODULE_PARM_DESC(wd_disable,
"Disable stuck queue watchdog timer 0=system default, "
"1=disable, 2=enable (default: 0)");
+module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, S_IRUGO);
+MODULE_PARM_DESC(nvm_file, "NVM file name");
+
/*
* set bt_coex_active to true, uCode will do kill/defer
* every time the priority line is asserted (BT is sending signals on the
@@ -1269,8 +1261,3 @@ module_param_named(power_level, iwlwifi_mod_params.power_level,
int, S_IRUGO);
MODULE_PARM_DESC(power_level,
"default power save level (range from 1 - 5, default: 1)");
-
-module_param_named(auto_agg, iwlwifi_mod_params.auto_agg,
- bool, S_IRUGO);
-MODULE_PARM_DESC(auto_agg,
- "enable agg w/o check traffic load (default: enable)");
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h
index 7d1450916308..429337a2b9a1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.h
@@ -62,8 +62,7 @@
#ifndef __iwl_drv_h__
#define __iwl_drv_h__
-
-#include <linux/module.h>
+#include <linux/export.h>
/* for all modules */
#define DRV_NAME "iwlwifi"
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
index 600c9fdd7f71..4c887f365908 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
@@ -732,17 +732,16 @@ int iwl_init_sband_channels(struct iwl_nvm_data *data,
void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
struct ieee80211_sta_ht_cap *ht_info,
- enum ieee80211_band band)
+ enum ieee80211_band band,
+ u8 tx_chains, u8 rx_chains)
{
int max_bit_rate = 0;
- u8 rx_chains;
- u8 tx_chains;
- tx_chains = hweight8(data->valid_tx_ant);
+ tx_chains = hweight8(tx_chains);
if (cfg->rx_with_siso_diversity)
rx_chains = 1;
else
- rx_chains = hweight8(data->valid_rx_ant);
+ rx_chains = hweight8(rx_chains);
if (!(data->sku_cap_11n_enable) || !cfg->ht_params) {
ht_info->ht_supported = false;
@@ -806,7 +805,8 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
sband->n_bitrates = N_RATES_24;
n_used += iwl_init_sband_channels(data, sband, n_channels,
IEEE80211_BAND_2GHZ);
- iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ);
+ iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ,
+ data->valid_tx_ant, data->valid_rx_ant);
sband = &data->bands[IEEE80211_BAND_5GHZ];
sband->band = IEEE80211_BAND_5GHZ;
@@ -814,7 +814,8 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
sband->n_bitrates = N_RATES_52;
n_used += iwl_init_sband_channels(data, sband, n_channels,
IEEE80211_BAND_5GHZ);
- iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ);
+ iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
+ data->valid_tx_ant, data->valid_rx_ant);
if (n_channels != n_used)
IWL_ERR_DEV(dev, "EEPROM: used only %d of %d channels\n",
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
index 37f115390b19..d73304a23ec2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
@@ -133,6 +133,7 @@ int iwl_init_sband_channels(struct iwl_nvm_data *data,
void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
struct ieee80211_sta_ht_cap *ht_info,
- enum ieee80211_band band);
+ enum ieee80211_band band,
+ u8 tx_chains, u8 rx_chains);
#endif /* __iwl_eeprom_parse_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index c4c446d41eb0..f844d5c748c0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -106,11 +106,14 @@ enum iwl_ucode_type {
/*
* enumeration of ucode section.
- * This enumeration is used for legacy tlv style (before 16.0 uCode).
+ * This enumeration is used directly for older firmware (before 16.0).
+ * For new firmware, there can be up to 4 sections (see below) but the
+ * first one packaged into the firmware file is the DATA section and
+ * some debugging code accesses that.
*/
enum iwl_ucode_sec {
- IWL_UCODE_SECTION_INST,
IWL_UCODE_SECTION_DATA,
+ IWL_UCODE_SECTION_INST,
};
/*
* For 16.0 uCode and above, there is no differentiation between sections,
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index d6f6c37c09fd..a1f580c0c6c6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -93,7 +93,6 @@ enum iwl_power_level {
* use IWL_DISABLE_HT_* constants
* @amsdu_size_8K: enable 8K amsdu size, default = 0
* @restart_fw: restart firmware, default = 1
- * @plcp_check: enable plcp health check, default = true
* @wd_disable: enable stuck queue check, default = 0
* @bt_coex_active: enable bt coex, default = true
* @led_mode: system default, default = 0
@@ -101,24 +100,22 @@ enum iwl_power_level {
* @power_level: power level, default = 1
* @debug_level: levels are IWL_DL_*
* @ant_coupling: antenna coupling in dB, default = 0
- * @bt_ch_announce: BT channel inhibition, default = enable
- * @auto_agg: enable agg. without check, default = true
*/
struct iwl_mod_params {
int sw_crypto;
unsigned int disable_11n;
int amsdu_size_8K;
bool restart_fw;
- bool plcp_check;
int wd_disable;
bool bt_coex_active;
int led_mode;
bool power_save;
int power_level;
+#ifdef CONFIG_IWLWIFI_DEBUG
u32 debug_level;
+#endif
int ant_coupling;
- bool bt_ch_announce;
- bool auto_agg;
+ char *nvm_file;
};
#endif /* #__iwl_modparams_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 6199a0a597a6..acd2665afb8c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -89,6 +89,7 @@ enum nvm_sku_bits {
NVM_SKU_CAP_BAND_24GHZ = BIT(0),
NVM_SKU_CAP_BAND_52GHZ = BIT(1),
NVM_SKU_CAP_11N_ENABLE = BIT(2),
+ NVM_SKU_CAP_11AC_ENABLE = BIT(3),
};
/* radio config bits (actual values from NVM definition) */
@@ -258,8 +259,6 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
struct ieee80211_sta_vht_cap *vht_cap)
{
- /* For now, assume new devices with NVM are VHT capable */
-
vht_cap->vht_supported = true;
vht_cap->cap = IEEE80211_VHT_CAP_SHORT_GI_80 |
@@ -292,7 +291,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
}
static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
- struct iwl_nvm_data *data, const __le16 *nvm_sw)
+ struct iwl_nvm_data *data, const __le16 *nvm_sw,
+ bool enable_vht, u8 tx_chains, u8 rx_chains)
{
int n_channels = iwl_init_channel_map(dev, cfg, data,
&nvm_sw[NVM_CHANNELS]);
@@ -305,7 +305,8 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
sband->n_bitrates = N_RATES_24;
n_used += iwl_init_sband_channels(data, sband, n_channels,
IEEE80211_BAND_2GHZ);
- iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ);
+ iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ,
+ tx_chains, rx_chains);
sband = &data->bands[IEEE80211_BAND_5GHZ];
sband->band = IEEE80211_BAND_5GHZ;
@@ -313,8 +314,10 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
sband->n_bitrates = N_RATES_52;
n_used += iwl_init_sband_channels(data, sband, n_channels,
IEEE80211_BAND_5GHZ);
- iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ);
- iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap);
+ iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
+ tx_chains, rx_chains);
+ if (enable_vht)
+ iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap);
if (n_channels != n_used)
IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
@@ -324,7 +327,7 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *
iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
const __le16 *nvm_hw, const __le16 *nvm_sw,
- const __le16 *nvm_calib)
+ const __le16 *nvm_calib, u8 tx_chains, u8 rx_chains)
{
struct iwl_nvm_data *data;
u8 hw_addr[ETH_ALEN];
@@ -380,7 +383,8 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
data->hw_addr[4] = hw_addr[5];
data->hw_addr[5] = hw_addr[4];
- iwl_init_sbands(dev, cfg, data, nvm_sw);
+ iwl_init_sbands(dev, cfg, data, nvm_sw, sku & NVM_SKU_CAP_11AC_ENABLE,
+ tx_chains, rx_chains);
data->calib_version = 255; /* TODO:
this value will prevent some checks from
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
index e57fb989661e..3325059c52d4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
@@ -75,6 +75,6 @@
struct iwl_nvm_data *
iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
const __le16 *nvm_hw, const __le16 *nvm_sw,
- const __le16 *nvm_calib);
+ const __le16 *nvm_calib, u8 tx_chains, u8 rx_chains);
#endif /* __iwl_nvm_parse_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
index 25745daa0d5d..1a405ae6a9c5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
@@ -92,20 +92,16 @@ struct iwl_phy_db_entry {
struct iwl_phy_db {
struct iwl_phy_db_entry cfg;
struct iwl_phy_db_entry calib_nch;
- struct iwl_phy_db_entry calib_ch;
struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS];
struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS];
- u32 channel_num;
- u32 channel_size;
-
struct iwl_trans *trans;
};
enum iwl_phy_db_section_type {
IWL_PHY_DB_CFG = 1,
IWL_PHY_DB_CALIB_NCH,
- IWL_PHY_DB_CALIB_CH,
+ IWL_PHY_DB_UNUSED,
IWL_PHY_DB_CALIB_CHG_PAPD,
IWL_PHY_DB_CALIB_CHG_TXP,
IWL_PHY_DB_MAX
@@ -169,8 +165,6 @@ iwl_phy_db_get_section(struct iwl_phy_db *phy_db,
return &phy_db->cfg;
case IWL_PHY_DB_CALIB_NCH:
return &phy_db->calib_nch;
- case IWL_PHY_DB_CALIB_CH:
- return &phy_db->calib_ch;
case IWL_PHY_DB_CALIB_CHG_PAPD:
if (chg_id >= IWL_NUM_PAPD_CH_GROUPS)
return NULL;
@@ -208,7 +202,6 @@ void iwl_phy_db_free(struct iwl_phy_db *phy_db)
iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0);
iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0);
- iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CH, 0);
for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++)
iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i);
for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++)
@@ -248,13 +241,6 @@ int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
entry->size = size;
- if (type == IWL_PHY_DB_CALIB_CH) {
- phy_db->channel_num =
- le32_to_cpup((__le32 *)phy_db_notif->data);
- phy_db->channel_size =
- (size - CHANNEL_NUM_SIZE) / phy_db->channel_num;
- }
-
IWL_DEBUG_INFO(phy_db->trans,
"%s(%d): [PHYDB]SET: Type %d , Size: %d\n",
__func__, __LINE__, type, size);
@@ -328,10 +314,7 @@ int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
u32 type, u8 **data, u16 *size, u16 ch_id)
{
struct iwl_phy_db_entry *entry;
- u32 channel_num;
- u32 channel_size;
u16 ch_group_id = 0;
- u16 index;
if (!phy_db)
return -EINVAL;
@@ -346,21 +329,8 @@ int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
if (!entry)
return -EINVAL;
- if (type == IWL_PHY_DB_CALIB_CH) {
- index = ch_id_to_ch_index(ch_id);
- channel_num = phy_db->channel_num;
- channel_size = phy_db->channel_size;
- if (index >= channel_num) {
- IWL_ERR(phy_db->trans, "Wrong channel number %d\n",
- ch_id);
- return -EINVAL;
- }
- *data = entry->data + CHANNEL_NUM_SIZE + index * channel_size;
- *size = channel_size;
- } else {
- *data = entry->data;
- *size = entry->size;
- }
+ *data = entry->data;
+ *size = entry->size;
IWL_DEBUG_INFO(phy_db->trans,
"%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
@@ -413,6 +383,9 @@ static int iwl_phy_db_send_all_channel_groups(
if (!entry)
return -EINVAL;
+ if (WARN_ON_ONCE(!entry->size))
+ continue;
+
/* Send the requested PHY DB section */
err = iwl_send_phy_db_cmd(phy_db,
type,
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 386f2a7c87cb..a70c7b9d9bad 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -97,9 +97,23 @@
#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
+#define APMG_RTC_INT_STT_RFKILL (0x10000000)
+
/* Device system time */
#define DEVICE_SYSTEM_TIME_REG 0xA0206C
+/*****************************************************************************
+ * 7000/3000 series SHR DTS addresses *
+ *****************************************************************************/
+
+#define SHR_MISC_WFM_DTS_EN (0x00a10024)
+#define DTSC_CFG_MODE (0x00a10604)
+#define DTSC_VREF_AVG (0x00a10648)
+#define DTSC_VREF5_AVG (0x00a1064c)
+#define DTSC_CFG_MODE_PERIODIC (0x2)
+#define DTSC_PTAT_AVG (0x00a10650)
+
+
/**
* Tx Scheduler
*
diff --git a/drivers/net/wireless/iwlwifi/iwl-test.c b/drivers/net/wireless/iwlwifi/iwl-test.c
deleted file mode 100644
index 5cfd55b86ed3..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-test.c
+++ /dev/null
@@ -1,852 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#include <linux/export.h>
-#include <net/netlink.h>
-
-#include "iwl-drv.h"
-#include "iwl-io.h"
-#include "iwl-fh.h"
-#include "iwl-prph.h"
-#include "iwl-trans.h"
-#include "iwl-test.h"
-#include "iwl-csr.h"
-#include "iwl-testmode.h"
-
-/*
- * Periphery registers absolute lower bound. This is used in order to
- * differentiate registery access through HBUS_TARG_PRPH_* and
- * HBUS_TARG_MEM_* accesses.
- */
-#define IWL_ABS_PRPH_START (0xA00000)
-
-/*
- * The TLVs used in the gnl message policy between the kernel module and
- * user space application. iwl_testmode_gnl_msg_policy is to be carried
- * through the NL80211_CMD_TESTMODE channel regulated by nl80211.
- * See iwl-testmode.h
- */
-static
-struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
- [IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, },
-
- [IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, },
- [IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, },
-
- [IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, },
- [IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, },
- [IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, },
-
- [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, },
- [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, },
-
- [IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, },
-
- [IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, },
- [IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, },
- [IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, },
-
- [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
-
- [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
-
- [IWL_TM_ATTR_MEM_ADDR] = { .type = NLA_U32, },
- [IWL_TM_ATTR_BUFFER_SIZE] = { .type = NLA_U32, },
- [IWL_TM_ATTR_BUFFER_DUMP] = { .type = NLA_UNSPEC, },
-
- [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, },
- [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, },
- [IWL_TM_ATTR_FW_TYPE] = { .type = NLA_U32, },
- [IWL_TM_ATTR_FW_INST_SIZE] = { .type = NLA_U32, },
- [IWL_TM_ATTR_FW_DATA_SIZE] = { .type = NLA_U32, },
-
- [IWL_TM_ATTR_ENABLE_NOTIFICATION] = {.type = NLA_FLAG, },
-};
-
-static inline void iwl_test_trace_clear(struct iwl_test *tst)
-{
- memset(&tst->trace, 0, sizeof(struct iwl_test_trace));
-}
-
-static void iwl_test_trace_stop(struct iwl_test *tst)
-{
- if (!tst->trace.enabled)
- return;
-
- if (tst->trace.cpu_addr && tst->trace.dma_addr)
- dma_free_coherent(tst->trans->dev,
- tst->trace.tsize,
- tst->trace.cpu_addr,
- tst->trace.dma_addr);
-
- iwl_test_trace_clear(tst);
-}
-
-static inline void iwl_test_mem_clear(struct iwl_test *tst)
-{
- memset(&tst->mem, 0, sizeof(struct iwl_test_mem));
-}
-
-static inline void iwl_test_mem_stop(struct iwl_test *tst)
-{
- if (!tst->mem.in_read)
- return;
-
- iwl_test_mem_clear(tst);
-}
-
-/*
- * Initializes the test object
- * During the lifetime of the test object it is assumed that the transport is
- * started. The test object should be stopped before the transport is stopped.
- */
-void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans,
- struct iwl_test_ops *ops)
-{
- tst->trans = trans;
- tst->ops = ops;
-
- iwl_test_trace_clear(tst);
- iwl_test_mem_clear(tst);
-}
-EXPORT_SYMBOL_GPL(iwl_test_init);
-
-/*
- * Stop the test object
- */
-void iwl_test_free(struct iwl_test *tst)
-{
- iwl_test_mem_stop(tst);
- iwl_test_trace_stop(tst);
-}
-EXPORT_SYMBOL_GPL(iwl_test_free);
-
-static inline int iwl_test_send_cmd(struct iwl_test *tst,
- struct iwl_host_cmd *cmd)
-{
- return tst->ops->send_cmd(tst->trans->op_mode, cmd);
-}
-
-static inline bool iwl_test_valid_hw_addr(struct iwl_test *tst, u32 addr)
-{
- return tst->ops->valid_hw_addr(addr);
-}
-
-static inline u32 iwl_test_fw_ver(struct iwl_test *tst)
-{
- return tst->ops->get_fw_ver(tst->trans->op_mode);
-}
-
-static inline struct sk_buff*
-iwl_test_alloc_reply(struct iwl_test *tst, int len)
-{
- return tst->ops->alloc_reply(tst->trans->op_mode, len);
-}
-
-static inline int iwl_test_reply(struct iwl_test *tst, struct sk_buff *skb)
-{
- return tst->ops->reply(tst->trans->op_mode, skb);
-}
-
-static inline struct sk_buff*
-iwl_test_alloc_event(struct iwl_test *tst, int len)
-{
- return tst->ops->alloc_event(tst->trans->op_mode, len);
-}
-
-static inline void
-iwl_test_event(struct iwl_test *tst, struct sk_buff *skb)
-{
- return tst->ops->event(tst->trans->op_mode, skb);
-}
-
-/*
- * This function handles the user application commands to the fw. The fw
- * commands are sent in a synchronuous manner. In case that the user requested
- * to get commands response, it is send to the user.
- */
-static int iwl_test_fw_cmd(struct iwl_test *tst, struct nlattr **tb)
-{
- struct iwl_host_cmd cmd;
- struct iwl_rx_packet *pkt;
- struct sk_buff *skb;
- void *reply_buf;
- u32 reply_len;
- int ret;
- bool cmd_want_skb;
-
- memset(&cmd, 0, sizeof(struct iwl_host_cmd));
-
- if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] ||
- !tb[IWL_TM_ATTR_UCODE_CMD_DATA]) {
- IWL_ERR(tst->trans, "Missing fw command mandatory fields\n");
- return -ENOMSG;
- }
-
- cmd.flags = CMD_ON_DEMAND | CMD_SYNC;
- cmd_want_skb = nla_get_flag(tb[IWL_TM_ATTR_UCODE_CMD_SKB]);
- if (cmd_want_skb)
- cmd.flags |= CMD_WANT_SKB;
-
- cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]);
- cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
- cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
- cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
- IWL_DEBUG_INFO(tst->trans, "test fw cmd=0x%x, flags 0x%x, len %d\n",
- cmd.id, cmd.flags, cmd.len[0]);
-
- ret = iwl_test_send_cmd(tst, &cmd);
- if (ret) {
- IWL_ERR(tst->trans, "Failed to send hcmd\n");
- return ret;
- }
- if (!cmd_want_skb)
- return ret;
-
- /* Handling return of SKB to the user */
- pkt = cmd.resp_pkt;
- if (!pkt) {
- IWL_ERR(tst->trans, "HCMD received a null response packet\n");
- return ret;
- }
-
- reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- skb = iwl_test_alloc_reply(tst, reply_len + 20);
- reply_buf = kmemdup(&pkt->hdr, reply_len, GFP_KERNEL);
- if (!skb || !reply_buf) {
- kfree_skb(skb);
- kfree(reply_buf);
- return -ENOMEM;
- }
-
- /* The reply is in a page, that we cannot send to user space. */
- iwl_free_resp(&cmd);
-
- if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
- IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
- nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf))
- goto nla_put_failure;
- return iwl_test_reply(tst, skb);
-
-nla_put_failure:
- IWL_DEBUG_INFO(tst->trans, "Failed creating NL attributes\n");
- kfree(reply_buf);
- kfree_skb(skb);
- return -ENOMSG;
-}
-
-/*
- * Handles the user application commands for register access.
- */
-static int iwl_test_reg(struct iwl_test *tst, struct nlattr **tb)
-{
- u32 ofs, val32, cmd;
- u8 val8;
- struct sk_buff *skb;
- int status = 0;
- struct iwl_trans *trans = tst->trans;
-
- if (!tb[IWL_TM_ATTR_REG_OFFSET]) {
- IWL_ERR(trans, "Missing reg offset\n");
- return -ENOMSG;
- }
-
- ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]);
- IWL_DEBUG_INFO(trans, "test reg access cmd offset=0x%x\n", ofs);
-
- cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
-
- /*
- * Allow access only to FH/CSR/HBUS in direct mode.
- * Since we don't have the upper bounds for the CSR and HBUS segments,
- * we will use only the upper bound of FH for sanity check.
- */
- if (ofs >= FH_MEM_UPPER_BOUND) {
- IWL_ERR(trans, "offset out of segment (0x0 - 0x%x)\n",
- FH_MEM_UPPER_BOUND);
- return -EINVAL;
- }
-
- switch (cmd) {
- case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
- val32 = iwl_read_direct32(tst->trans, ofs);
- IWL_DEBUG_INFO(trans, "32 value to read 0x%x\n", val32);
-
- skb = iwl_test_alloc_reply(tst, 20);
- if (!skb) {
- IWL_ERR(trans, "Memory allocation fail\n");
- return -ENOMEM;
- }
- if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32))
- goto nla_put_failure;
- status = iwl_test_reply(tst, skb);
- if (status < 0)
- IWL_ERR(trans, "Error sending msg : %d\n", status);
- break;
-
- case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
- if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
- IWL_ERR(trans, "Missing value to write\n");
- return -ENOMSG;
- } else {
- val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
- IWL_DEBUG_INFO(trans, "32b write val=0x%x\n", val32);
- iwl_write_direct32(tst->trans, ofs, val32);
- }
- break;
-
- case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
- if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
- IWL_ERR(trans, "Missing value to write\n");
- return -ENOMSG;
- } else {
- val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
- IWL_DEBUG_INFO(trans, "8b write val=0x%x\n", val8);
- iwl_write8(tst->trans, ofs, val8);
- }
- break;
-
- default:
- IWL_ERR(trans, "Unknown test register cmd ID\n");
- return -ENOMSG;
- }
-
- return status;
-
-nla_put_failure:
- kfree_skb(skb);
- return -EMSGSIZE;
-}
-
-/*
- * Handles the request to start FW tracing. Allocates of the trace buffer
- * and sends a reply to user space with the address of the allocated buffer.
- */
-static int iwl_test_trace_begin(struct iwl_test *tst, struct nlattr **tb)
-{
- struct sk_buff *skb;
- int status = 0;
-
- if (tst->trace.enabled)
- return -EBUSY;
-
- if (!tb[IWL_TM_ATTR_TRACE_SIZE])
- tst->trace.size = TRACE_BUFF_SIZE_DEF;
- else
- tst->trace.size =
- nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]);
-
- if (!tst->trace.size)
- return -EINVAL;
-
- if (tst->trace.size < TRACE_BUFF_SIZE_MIN ||
- tst->trace.size > TRACE_BUFF_SIZE_MAX)
- return -EINVAL;
-
- tst->trace.tsize = tst->trace.size + TRACE_BUFF_PADD;
- tst->trace.cpu_addr = dma_alloc_coherent(tst->trans->dev,
- tst->trace.tsize,
- &tst->trace.dma_addr,
- GFP_KERNEL);
- if (!tst->trace.cpu_addr)
- return -ENOMEM;
-
- tst->trace.enabled = true;
- tst->trace.trace_addr = (u8 *)PTR_ALIGN(tst->trace.cpu_addr, 0x100);
-
- memset(tst->trace.trace_addr, 0x03B, tst->trace.size);
-
- skb = iwl_test_alloc_reply(tst, sizeof(tst->trace.dma_addr) + 20);
- if (!skb) {
- IWL_ERR(tst->trans, "Memory allocation fail\n");
- iwl_test_trace_stop(tst);
- return -ENOMEM;
- }
-
- if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR,
- sizeof(tst->trace.dma_addr),
- (u64 *)&tst->trace.dma_addr))
- goto nla_put_failure;
-
- status = iwl_test_reply(tst, skb);
- if (status < 0)
- IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
-
- tst->trace.nchunks = DIV_ROUND_UP(tst->trace.size,
- DUMP_CHUNK_SIZE);
-
- return status;
-
-nla_put_failure:
- kfree_skb(skb);
- if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) ==
- IWL_TM_CMD_APP2DEV_BEGIN_TRACE)
- iwl_test_trace_stop(tst);
- return -EMSGSIZE;
-}
-
-/*
- * Handles indirect read from the periphery or the SRAM. The read is performed
- * to a temporary buffer. The user space application should later issue a dump
- */
-static int iwl_test_indirect_read(struct iwl_test *tst, u32 addr, u32 size)
-{
- struct iwl_trans *trans = tst->trans;
- unsigned long flags;
- int i;
-
- if (size & 0x3)
- return -EINVAL;
-
- tst->mem.size = size;
- tst->mem.addr = kmalloc(tst->mem.size, GFP_KERNEL);
- if (tst->mem.addr == NULL)
- return -ENOMEM;
-
- /* Hard-coded periphery absolute address */
- if (IWL_ABS_PRPH_START <= addr &&
- addr < IWL_ABS_PRPH_START + PRPH_END) {
- if (!iwl_trans_grab_nic_access(trans, false, &flags)) {
- return -EIO;
- }
- iwl_write32(trans, HBUS_TARG_PRPH_RADDR,
- addr | (3 << 24));
- for (i = 0; i < size; i += 4)
- *(u32 *)(tst->mem.addr + i) =
- iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
- iwl_trans_release_nic_access(trans, &flags);
- } else { /* target memory (SRAM) */
- iwl_trans_read_mem(trans, addr, tst->mem.addr,
- tst->mem.size / 4);
- }
-
- tst->mem.nchunks =
- DIV_ROUND_UP(tst->mem.size, DUMP_CHUNK_SIZE);
- tst->mem.in_read = true;
- return 0;
-
-}
-
-/*
- * Handles indirect write to the periphery or SRAM. The is performed to a
- * temporary buffer.
- */
-static int iwl_test_indirect_write(struct iwl_test *tst, u32 addr,
- u32 size, unsigned char *buf)
-{
- struct iwl_trans *trans = tst->trans;
- u32 val, i;
- unsigned long flags;
-
- if (IWL_ABS_PRPH_START <= addr &&
- addr < IWL_ABS_PRPH_START + PRPH_END) {
- /* Periphery writes can be 1-3 bytes long, or DWORDs */
- if (size < 4) {
- memcpy(&val, buf, size);
- if (!iwl_trans_grab_nic_access(trans, false, &flags))
- return -EIO;
- iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
- (addr & 0x0000FFFF) |
- ((size - 1) << 24));
- iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
- iwl_trans_release_nic_access(trans, &flags);
- } else {
- if (size % 4)
- return -EINVAL;
- for (i = 0; i < size; i += 4)
- iwl_write_prph(trans, addr+i,
- *(u32 *)(buf+i));
- }
- } else if (iwl_test_valid_hw_addr(tst, addr)) {
- iwl_trans_write_mem(trans, addr, buf, size / 4);
- } else {
- return -EINVAL;
- }
- return 0;
-}
-
-/*
- * Handles the user application commands for indirect read/write
- * to/from the periphery or the SRAM.
- */
-static int iwl_test_indirect_mem(struct iwl_test *tst, struct nlattr **tb)
-{
- u32 addr, size, cmd;
- unsigned char *buf;
-
- /* Both read and write should be blocked, for atomicity */
- if (tst->mem.in_read)
- return -EBUSY;
-
- cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
- if (!tb[IWL_TM_ATTR_MEM_ADDR]) {
- IWL_ERR(tst->trans, "Error finding memory offset address\n");
- return -ENOMSG;
- }
- addr = nla_get_u32(tb[IWL_TM_ATTR_MEM_ADDR]);
- if (!tb[IWL_TM_ATTR_BUFFER_SIZE]) {
- IWL_ERR(tst->trans, "Error finding size for memory reading\n");
- return -ENOMSG;
- }
- size = nla_get_u32(tb[IWL_TM_ATTR_BUFFER_SIZE]);
-
- if (cmd == IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ) {
- return iwl_test_indirect_read(tst, addr, size);
- } else {
- if (!tb[IWL_TM_ATTR_BUFFER_DUMP])
- return -EINVAL;
- buf = (unsigned char *)nla_data(tb[IWL_TM_ATTR_BUFFER_DUMP]);
- return iwl_test_indirect_write(tst, addr, size, buf);
- }
-}
-
-/*
- * Enable notifications to user space
- */
-static int iwl_test_notifications(struct iwl_test *tst,
- struct nlattr **tb)
-{
- tst->notify = nla_get_flag(tb[IWL_TM_ATTR_ENABLE_NOTIFICATION]);
- return 0;
-}
-
-/*
- * Handles the request to get the device id
- */
-static int iwl_test_get_dev_id(struct iwl_test *tst, struct nlattr **tb)
-{
- u32 devid = tst->trans->hw_id;
- struct sk_buff *skb;
- int status;
-
- IWL_DEBUG_INFO(tst->trans, "hw version: 0x%x\n", devid);
-
- skb = iwl_test_alloc_reply(tst, 20);
- if (!skb) {
- IWL_ERR(tst->trans, "Memory allocation fail\n");
- return -ENOMEM;
- }
-
- if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid))
- goto nla_put_failure;
- status = iwl_test_reply(tst, skb);
- if (status < 0)
- IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
-
- return 0;
-
-nla_put_failure:
- kfree_skb(skb);
- return -EMSGSIZE;
-}
-
-/*
- * Handles the request to get the FW version
- */
-static int iwl_test_get_fw_ver(struct iwl_test *tst, struct nlattr **tb)
-{
- struct sk_buff *skb;
- int status;
- u32 ver = iwl_test_fw_ver(tst);
-
- IWL_DEBUG_INFO(tst->trans, "uCode version raw: 0x%x\n", ver);
-
- skb = iwl_test_alloc_reply(tst, 20);
- if (!skb) {
- IWL_ERR(tst->trans, "Memory allocation fail\n");
- return -ENOMEM;
- }
-
- if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION, ver))
- goto nla_put_failure;
-
- status = iwl_test_reply(tst, skb);
- if (status < 0)
- IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
-
- return 0;
-
-nla_put_failure:
- kfree_skb(skb);
- return -EMSGSIZE;
-}
-
-/*
- * Parse the netlink message and validate that the IWL_TM_ATTR_CMD exists
- */
-int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb,
- void *data, int len)
-{
- int result;
-
- result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
- iwl_testmode_gnl_msg_policy);
- if (result) {
- IWL_ERR(tst->trans, "Fail parse gnl msg: %d\n", result);
- return result;
- }
-
- /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
- if (!tb[IWL_TM_ATTR_COMMAND]) {
- IWL_ERR(tst->trans, "Missing testmode command type\n");
- return -ENOMSG;
- }
- return 0;
-}
-IWL_EXPORT_SYMBOL(iwl_test_parse);
-
-/*
- * Handle test commands.
- * Returns 1 for unknown commands (not handled by the test object); negative
- * value in case of error.
- */
-int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb)
-{
- int result;
-
- switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
- case IWL_TM_CMD_APP2DEV_UCODE:
- IWL_DEBUG_INFO(tst->trans, "test cmd to uCode\n");
- result = iwl_test_fw_cmd(tst, tb);
- break;
-
- case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
- case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
- case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
- IWL_DEBUG_INFO(tst->trans, "test cmd to register\n");
- result = iwl_test_reg(tst, tb);
- break;
-
- case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
- IWL_DEBUG_INFO(tst->trans, "test uCode trace cmd to driver\n");
- result = iwl_test_trace_begin(tst, tb);
- break;
-
- case IWL_TM_CMD_APP2DEV_END_TRACE:
- iwl_test_trace_stop(tst);
- result = 0;
- break;
-
- case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
- case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
- IWL_DEBUG_INFO(tst->trans, "test indirect memory cmd\n");
- result = iwl_test_indirect_mem(tst, tb);
- break;
-
- case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
- IWL_DEBUG_INFO(tst->trans, "test notifications cmd\n");
- result = iwl_test_notifications(tst, tb);
- break;
-
- case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
- IWL_DEBUG_INFO(tst->trans, "test get FW ver cmd\n");
- result = iwl_test_get_fw_ver(tst, tb);
- break;
-
- case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
- IWL_DEBUG_INFO(tst->trans, "test Get device ID cmd\n");
- result = iwl_test_get_dev_id(tst, tb);
- break;
-
- default:
- IWL_DEBUG_INFO(tst->trans, "Unknown test command\n");
- result = 1;
- break;
- }
- return result;
-}
-IWL_EXPORT_SYMBOL(iwl_test_handle_cmd);
-
-static int iwl_test_trace_dump(struct iwl_test *tst, struct sk_buff *skb,
- struct netlink_callback *cb)
-{
- int idx, length;
-
- if (!tst->trace.enabled || !tst->trace.trace_addr)
- return -EFAULT;
-
- idx = cb->args[4];
- if (idx >= tst->trace.nchunks)
- return -ENOENT;
-
- length = DUMP_CHUNK_SIZE;
- if (((idx + 1) == tst->trace.nchunks) &&
- (tst->trace.size % DUMP_CHUNK_SIZE))
- length = tst->trace.size %
- DUMP_CHUNK_SIZE;
-
- if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length,
- tst->trace.trace_addr + (DUMP_CHUNK_SIZE * idx)))
- goto nla_put_failure;
-
- cb->args[4] = ++idx;
- return 0;
-
- nla_put_failure:
- return -ENOBUFS;
-}
-
-static int iwl_test_buffer_dump(struct iwl_test *tst, struct sk_buff *skb,
- struct netlink_callback *cb)
-{
- int idx, length;
-
- if (!tst->mem.in_read)
- return -EFAULT;
-
- idx = cb->args[4];
- if (idx >= tst->mem.nchunks) {
- iwl_test_mem_stop(tst);
- return -ENOENT;
- }
-
- length = DUMP_CHUNK_SIZE;
- if (((idx + 1) == tst->mem.nchunks) &&
- (tst->mem.size % DUMP_CHUNK_SIZE))
- length = tst->mem.size % DUMP_CHUNK_SIZE;
-
- if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
- tst->mem.addr + (DUMP_CHUNK_SIZE * idx)))
- goto nla_put_failure;
-
- cb->args[4] = ++idx;
- return 0;
-
- nla_put_failure:
- return -ENOBUFS;
-}
-
-/*
- * Handle dump commands.
- * Returns 1 for unknown commands (not handled by the test object); negative
- * value in case of error.
- */
-int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb,
- struct netlink_callback *cb)
-{
- int result;
-
- switch (cmd) {
- case IWL_TM_CMD_APP2DEV_READ_TRACE:
- IWL_DEBUG_INFO(tst->trans, "uCode trace cmd\n");
- result = iwl_test_trace_dump(tst, skb, cb);
- break;
-
- case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP:
- IWL_DEBUG_INFO(tst->trans, "testmode sram dump cmd\n");
- result = iwl_test_buffer_dump(tst, skb, cb);
- break;
-
- default:
- result = 1;
- break;
- }
- return result;
-}
-IWL_EXPORT_SYMBOL(iwl_test_dump);
-
-/*
- * Multicast a spontaneous messages from the device to the user space.
- */
-static void iwl_test_send_rx(struct iwl_test *tst,
- struct iwl_rx_cmd_buffer *rxb)
-{
- struct sk_buff *skb;
- struct iwl_rx_packet *data;
- int length;
-
- data = rxb_addr(rxb);
- length = le32_to_cpu(data->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
-
- /* the length doesn't include len_n_flags field, so add it manually */
- length += sizeof(__le32);
-
- skb = iwl_test_alloc_event(tst, length + 20);
- if (skb == NULL) {
- IWL_ERR(tst->trans, "Out of memory for message to user\n");
- return;
- }
-
- if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
- IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
- nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length, data))
- goto nla_put_failure;
-
- iwl_test_event(tst, skb);
- return;
-
-nla_put_failure:
- kfree_skb(skb);
- IWL_ERR(tst->trans, "Ouch, overran buffer, check allocation!\n");
-}
-
-/*
- * Called whenever a Rx frames is recevied from the device. If notifications to
- * the user space are requested, sends the frames to the user.
- */
-void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb)
-{
- if (tst->notify)
- iwl_test_send_rx(tst, rxb);
-}
-IWL_EXPORT_SYMBOL(iwl_test_rx);
diff --git a/drivers/net/wireless/iwlwifi/iwl-test.h b/drivers/net/wireless/iwlwifi/iwl-test.h
deleted file mode 100644
index 8fbd21704840..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-test.h
+++ /dev/null
@@ -1,161 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#ifndef __IWL_TEST_H__
-#define __IWL_TEST_H__
-
-#include <linux/types.h>
-#include "iwl-trans.h"
-
-struct iwl_test_trace {
- u32 size;
- u32 tsize;
- u32 nchunks;
- u8 *cpu_addr;
- u8 *trace_addr;
- dma_addr_t dma_addr;
- bool enabled;
-};
-
-struct iwl_test_mem {
- u32 size;
- u32 nchunks;
- u8 *addr;
- bool in_read;
-};
-
-/*
- * struct iwl_test_ops: callback to the op mode
- *
- * The structure defines the callbacks that the op_mode should handle,
- * inorder to handle logic that is out of the scope of iwl_test. The
- * op_mode must set all the callbacks.
-
- * @send_cmd: handler that is used by the test object to request the
- * op_mode to send a command to the fw.
- *
- * @valid_hw_addr: handler that is used by the test object to request the
- * op_mode to check if the given address is a valid address.
- *
- * @get_fw_ver: handler used to get the FW version.
- *
- * @alloc_reply: handler used by the test object to request the op_mode
- * to allocate an skb for sending a reply to the user, and initialize
- * the skb. It is assumed that the test object only fills the required
- * attributes.
- *
- * @reply: handler used by the test object to request the op_mode to reply
- * to a request. The skb is an skb previously allocated by the the
- * alloc_reply callback.
- I
- * @alloc_event: handler used by the test object to request the op_mode
- * to allocate an skb for sending an event, and initialize
- * the skb. It is assumed that the test object only fills the required
- * attributes.
- *
- * @reply: handler used by the test object to request the op_mode to send
- * an event. The skb is an skb previously allocated by the the
- * alloc_event callback.
- */
-struct iwl_test_ops {
- int (*send_cmd)(struct iwl_op_mode *op_modes,
- struct iwl_host_cmd *cmd);
- bool (*valid_hw_addr)(u32 addr);
- u32 (*get_fw_ver)(struct iwl_op_mode *op_mode);
-
- struct sk_buff *(*alloc_reply)(struct iwl_op_mode *op_mode, int len);
- int (*reply)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
- struct sk_buff* (*alloc_event)(struct iwl_op_mode *op_mode, int len);
- void (*event)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
-};
-
-struct iwl_test {
- struct iwl_trans *trans;
- struct iwl_test_ops *ops;
- struct iwl_test_trace trace;
- struct iwl_test_mem mem;
- bool notify;
-};
-
-void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans,
- struct iwl_test_ops *ops);
-
-void iwl_test_free(struct iwl_test *tst);
-
-int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb,
- void *data, int len);
-
-int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb);
-
-int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb,
- struct netlink_callback *cb);
-
-void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb);
-
-static inline void iwl_test_enable_notifications(struct iwl_test *tst,
- bool enable)
-{
- tst->notify = enable;
-}
-
-#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.h b/drivers/net/wireless/iwlwifi/iwl-testmode.h
deleted file mode 100644
index 98f48a9afc98..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-testmode.h
+++ /dev/null
@@ -1,309 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#ifndef __IWL_TESTMODE_H__
-#define __IWL_TESTMODE_H__
-
-#include <linux/types.h>
-
-
-/*
- * Commands from user space to kernel space(IWL_TM_CMD_ID_APP2DEV_XX) and
- * from and kernel space to user space(IWL_TM_CMD_ID_DEV2APP_XX).
- * The command ID is carried with IWL_TM_ATTR_COMMAND.
- *
- * @IWL_TM_CMD_APP2DEV_UCODE:
- * commands from user application to the uCode,
- * the actual uCode host command ID is carried with
- * IWL_TM_ATTR_UCODE_CMD_ID
- *
- * @IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
- * @IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
- * @IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
- * commands from user applicaiton to access register
- *
- * @IWL_TM_CMD_APP2DEV_GET_DEVICENAME: retrieve device name
- * @IWL_TM_CMD_APP2DEV_LOAD_INIT_FW: load initial uCode image
- * @IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB: perform calibration
- * @IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW: load runtime uCode image
- * @IWL_TM_CMD_APP2DEV_GET_EEPROM: request EEPROM data
- * @IWL_TM_CMD_APP2DEV_FIXRATE_REQ: set fix MCS
- * commands fom user space for pure driver level operations
- *
- * @IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
- * @IWL_TM_CMD_APP2DEV_END_TRACE:
- * @IWL_TM_CMD_APP2DEV_READ_TRACE:
- * commands fom user space for uCode trace operations
- *
- * @IWL_TM_CMD_DEV2APP_SYNC_RSP:
- * commands from kernel space to carry the synchronous response
- * to user application
- * @IWL_TM_CMD_DEV2APP_UCODE_RX_PKT:
- * commands from kernel space to multicast the spontaneous messages
- * to user application, or reply of host commands
- * @IWL_TM_CMD_DEV2APP_EEPROM_RSP:
- * commands from kernel space to carry the eeprom response
- * to user application
- *
- * @IWL_TM_CMD_APP2DEV_OWNERSHIP:
- * commands from user application to own change the ownership of the uCode
- * if application has the ownership, the only host command from
- * testmode will deliver to uCode. Default owner is driver
- *
- * @IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW: load Wake On Wireless LAN uCode image
- * @IWL_TM_CMD_APP2DEV_GET_FW_VERSION: retrieve uCode version
- * @IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: retrieve ID information in device
- * @IWL_TM_CMD_APP2DEV_GET_FW_INFO:
- * retrieve information of existing loaded uCode image
- *
- * @IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
- * @IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP:
- * @IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
- * Commands to read/write data from periphery or SRAM memory ranges.
- * Fore reading, a READ command is sent from the userspace and the data
- * is returned when the user calls a DUMP command.
- * For writing, only a WRITE command is used.
- * @IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
- * Command to enable/disable notifications (currently RX packets) from the
- * driver to userspace.
- */
-enum iwl_tm_cmd_t {
- IWL_TM_CMD_APP2DEV_UCODE = 1,
- IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32 = 2,
- IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32 = 3,
- IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8 = 4,
- IWL_TM_CMD_APP2DEV_GET_DEVICENAME = 5,
- IWL_TM_CMD_APP2DEV_LOAD_INIT_FW = 6,
- IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB = 7,
- IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW = 8,
- IWL_TM_CMD_APP2DEV_GET_EEPROM = 9,
- IWL_TM_CMD_APP2DEV_FIXRATE_REQ = 10,
- IWL_TM_CMD_APP2DEV_BEGIN_TRACE = 11,
- IWL_TM_CMD_APP2DEV_END_TRACE = 12,
- IWL_TM_CMD_APP2DEV_READ_TRACE = 13,
- IWL_TM_CMD_DEV2APP_SYNC_RSP = 14,
- IWL_TM_CMD_DEV2APP_UCODE_RX_PKT = 15,
- IWL_TM_CMD_DEV2APP_EEPROM_RSP = 16,
- IWL_TM_CMD_APP2DEV_OWNERSHIP = 17,
- RESERVED_18 = 18,
- RESERVED_19 = 19,
- RESERVED_20 = 20,
- RESERVED_21 = 21,
- IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW = 22,
- IWL_TM_CMD_APP2DEV_GET_FW_VERSION = 23,
- IWL_TM_CMD_APP2DEV_GET_DEVICE_ID = 24,
- IWL_TM_CMD_APP2DEV_GET_FW_INFO = 25,
- IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ = 26,
- IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP = 27,
- IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE = 28,
- IWL_TM_CMD_APP2DEV_NOTIFICATIONS = 29,
- IWL_TM_CMD_MAX = 30,
-};
-
-/*
- * Atrribute filed in testmode command
- * See enum iwl_tm_cmd_t.
- *
- * @IWL_TM_ATTR_NOT_APPLICABLE:
- * The attribute is not applicable or invalid
- * @IWL_TM_ATTR_COMMAND:
- * From user space to kernel space:
- * the command either destines to ucode, driver, or register;
- * From kernel space to user space:
- * the command either carries synchronous response,
- * or the spontaneous message multicast from the device;
- *
- * @IWL_TM_ATTR_UCODE_CMD_ID:
- * @IWL_TM_ATTR_UCODE_CMD_DATA:
- * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_UCODE,
- * The mandatory fields are :
- * IWL_TM_ATTR_UCODE_CMD_ID for recognizable command ID;
- * IWL_TM_ATTR_UCODE_CMD_DATA for the actual command payload
- * to the ucode
- *
- * @IWL_TM_ATTR_REG_OFFSET:
- * @IWL_TM_ATTR_REG_VALUE8:
- * @IWL_TM_ATTR_REG_VALUE32:
- * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_REG_XXX,
- * The mandatory fields are:
- * IWL_TM_ATTR_REG_OFFSET for the offset of the target register;
- * IWL_TM_ATTR_REG_VALUE8 or IWL_TM_ATTR_REG_VALUE32 for value
- *
- * @IWL_TM_ATTR_SYNC_RSP:
- * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_SYNC_RSP,
- * The mandatory fields are:
- * IWL_TM_ATTR_SYNC_RSP for the data content responding to the user
- * application command
- *
- * @IWL_TM_ATTR_UCODE_RX_PKT:
- * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_UCODE_RX_PKT,
- * The mandatory fields are:
- * IWL_TM_ATTR_UCODE_RX_PKT for the data content multicast to the user
- * application
- *
- * @IWL_TM_ATTR_EEPROM:
- * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_EEPROM,
- * The mandatory fields are:
- * IWL_TM_ATTR_EEPROM for the data content responging to the user
- * application
- *
- * @IWL_TM_ATTR_TRACE_ADDR:
- * @IWL_TM_ATTR_TRACE_SIZE:
- * @IWL_TM_ATTR_TRACE_DUMP:
- * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_XXX_TRACE,
- * The mandatory fields are:
- * IWL_TM_ATTR_MEM_TRACE_ADDR for the trace address
- * IWL_TM_ATTR_MEM_TRACE_SIZE for the trace buffer size
- * IWL_TM_ATTR_MEM_TRACE_DUMP for the trace dump
- *
- * @IWL_TM_ATTR_FIXRATE:
- * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_FIXRATE_REQ,
- * The mandatory fields are:
- * IWL_TM_ATTR_FIXRATE for the fixed rate
- *
- * @IWL_TM_ATTR_UCODE_OWNER:
- * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_OWNERSHIP,
- * The mandatory fields are:
- * IWL_TM_ATTR_UCODE_OWNER for the new owner
- *
- * @IWL_TM_ATTR_MEM_ADDR:
- * @IWL_TM_ATTR_BUFFER_SIZE:
- * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ
- * or IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE.
- * The mandatory fields are:
- * IWL_TM_ATTR_MEM_ADDR for the address in SRAM/periphery to read/write
- * IWL_TM_ATTR_BUFFER_SIZE for the buffer size of data to read/write.
- *
- * @IWL_TM_ATTR_BUFFER_DUMP:
- * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP,
- * IWL_TM_ATTR_BUFFER_DUMP is used for the data that was read.
- * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE,
- * this attribute contains the data to write.
- *
- * @IWL_TM_ATTR_FW_VERSION:
- * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_FW_VERSION,
- * IWL_TM_ATTR_FW_VERSION for the uCode version
- *
- * @IWL_TM_ATTR_DEVICE_ID:
- * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_DEVICE_ID,
- * IWL_TM_ATTR_DEVICE_ID for the device ID information
- *
- * @IWL_TM_ATTR_FW_TYPE:
- * @IWL_TM_ATTR_FW_INST_SIZE:
- * @IWL_TM_ATTR_FW_DATA_SIZE:
- * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_FW_INFO,
- * The mandatory fields are:
- * IWL_TM_ATTR_FW_TYPE for the uCode type (INIT/RUNTIME/...)
- * IWL_TM_ATTR_FW_INST_SIZE for the size of instruction section
- * IWL_TM_ATTR_FW_DATA_SIZE for the size of data section
- *
- * @IWL_TM_ATTR_UCODE_CMD_SKB:
- * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_UCODE this flag
- * indicates that the user wants to receive the response of the command
- * in a reply SKB. If it's not present, the response is not returned.
- * @IWL_TM_ATTR_ENABLE_NOTIFICATIONS:
- * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_NOTIFICATIONS, this
- * flag enables (if present) or disables (if not) the forwarding
- * to userspace.
- */
-enum iwl_tm_attr_t {
- IWL_TM_ATTR_NOT_APPLICABLE = 0,
- IWL_TM_ATTR_COMMAND = 1,
- IWL_TM_ATTR_UCODE_CMD_ID = 2,
- IWL_TM_ATTR_UCODE_CMD_DATA = 3,
- IWL_TM_ATTR_REG_OFFSET = 4,
- IWL_TM_ATTR_REG_VALUE8 = 5,
- IWL_TM_ATTR_REG_VALUE32 = 6,
- IWL_TM_ATTR_SYNC_RSP = 7,
- IWL_TM_ATTR_UCODE_RX_PKT = 8,
- IWL_TM_ATTR_EEPROM = 9,
- IWL_TM_ATTR_TRACE_ADDR = 10,
- IWL_TM_ATTR_TRACE_SIZE = 11,
- IWL_TM_ATTR_TRACE_DUMP = 12,
- IWL_TM_ATTR_FIXRATE = 13,
- IWL_TM_ATTR_UCODE_OWNER = 14,
- IWL_TM_ATTR_MEM_ADDR = 15,
- IWL_TM_ATTR_BUFFER_SIZE = 16,
- IWL_TM_ATTR_BUFFER_DUMP = 17,
- IWL_TM_ATTR_FW_VERSION = 18,
- IWL_TM_ATTR_DEVICE_ID = 19,
- IWL_TM_ATTR_FW_TYPE = 20,
- IWL_TM_ATTR_FW_INST_SIZE = 21,
- IWL_TM_ATTR_FW_DATA_SIZE = 22,
- IWL_TM_ATTR_UCODE_CMD_SKB = 23,
- IWL_TM_ATTR_ENABLE_NOTIFICATION = 24,
- IWL_TM_ATTR_MAX = 25,
-};
-
-/* uCode trace buffer */
-#define TRACE_BUFF_SIZE_MAX 0x200000
-#define TRACE_BUFF_SIZE_MIN 0x20000
-#define TRACE_BUFF_SIZE_DEF TRACE_BUFF_SIZE_MIN
-#define TRACE_BUFF_PADD 0x2000
-
-/* Maximum data size of each dump it packet */
-#define DUMP_CHUNK_SIZE (PAGE_SIZE - 1024)
-
-/* Address offset of data segment in SRAM */
-#define SRAM_DATA_SEG_OFFSET 0x800000
-
-#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 7a13790b5bfe..8d91422c5982 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -183,13 +183,12 @@ struct iwl_rx_packet {
* @CMD_ASYNC: Return right away and don't want for the response
* @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
* response. The caller needs to call iwl_free_resp when done.
- * @CMD_ON_DEMAND: This command is sent by the test mode pipe.
*/
enum CMD_MODE {
CMD_SYNC = 0,
CMD_ASYNC = BIT(0),
CMD_WANT_SKB = BIT(1),
- CMD_ON_DEMAND = BIT(2),
+ CMD_SEND_IN_RFKILL = BIT(2),
};
#define DEF_CMD_PAYLOAD_SIZE 320
@@ -427,8 +426,9 @@ struct iwl_trans_ops {
void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
void (*stop_device)(struct iwl_trans *trans);
- void (*d3_suspend)(struct iwl_trans *trans);
- int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status);
+ void (*d3_suspend)(struct iwl_trans *trans, bool test);
+ int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
+ bool test);
int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
@@ -455,7 +455,7 @@ struct iwl_trans_ops {
int (*read_mem)(struct iwl_trans *trans, u32 addr,
void *buf, int dwords);
int (*write_mem)(struct iwl_trans *trans, u32 addr,
- void *buf, int dwords);
+ const void *buf, int dwords);
void (*configure)(struct iwl_trans *trans,
const struct iwl_trans_config *trans_cfg);
void (*set_pmi)(struct iwl_trans *trans, bool state);
@@ -587,17 +587,18 @@ static inline void iwl_trans_stop_device(struct iwl_trans *trans)
trans->state = IWL_TRANS_NO_FW;
}
-static inline void iwl_trans_d3_suspend(struct iwl_trans *trans)
+static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test)
{
might_sleep();
- trans->ops->d3_suspend(trans);
+ trans->ops->d3_suspend(trans, test);
}
static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
- enum iwl_d3_status *status)
+ enum iwl_d3_status *status,
+ bool test)
{
might_sleep();
- return trans->ops->d3_resume(trans, status);
+ return trans->ops->d3_resume(trans, status, test);
}
static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
@@ -761,7 +762,7 @@ static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
}
static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
- void *buf, int dwords)
+ const void *buf, int dwords)
{
return trans->ops->write_mem(trans, addr, buf, dwords);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile
index 2acc44b40986..ff856e543ae8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/iwlwifi/mvm/Makefile
@@ -3,7 +3,7 @@ iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o
iwlmvm-y += scan.o time-event.o rs.o
iwlmvm-y += power.o bt-coex.o
-iwlmvm-y += led.o
+iwlmvm-y += led.o tt.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
index 810bfa5f6de0..dbd622a3929c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
@@ -174,7 +174,7 @@ static const __le32 iwl_tight_lookup[BT_COEX_LUT_SIZE] = {
static const __le32 iwl_loose_lookup[BT_COEX_LUT_SIZE] = {
cpu_to_le32(0xaaaaaaaa),
cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaeaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
cpu_to_le32(0xaaaaaaaa),
cpu_to_le32(0xcc00ff28),
cpu_to_le32(0x0000aaaa),
@@ -202,6 +202,22 @@ static const __le32 iwl_concurrent_lookup[BT_COEX_LUT_SIZE] = {
cpu_to_le32(0x00000000),
};
+/* single shared antenna */
+static const __le32 iwl_single_shared_ant_lookup[BT_COEX_LUT_SIZE] = {
+ cpu_to_le32(0x40000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x44000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x40000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x44000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0xC0004000),
+ cpu_to_le32(0xF0005000),
+ cpu_to_le32(0xC0004000),
+ cpu_to_le32(0xF0005000),
+};
+
int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
{
struct iwl_bt_coex_cmd cmd = {
@@ -225,7 +241,10 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
BT_VALID_REDUCED_TX_POWER |
BT_VALID_LUT);
- if (is_loose_coex())
+ if (mvm->cfg->bt_shared_single_ant)
+ memcpy(&cmd.decision_lut, iwl_single_shared_ant_lookup,
+ sizeof(iwl_single_shared_ant_lookup));
+ else if (is_loose_coex())
memcpy(&cmd.decision_lut, iwl_loose_lookup,
sizeof(iwl_tight_lookup));
else
@@ -351,6 +370,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
enum ieee80211_band band;
int ave_rssi;
+ lockdep_assert_held(&mvm->mutex);
if (vif->type != NL80211_IFTYPE_STATION)
return;
@@ -365,7 +385,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
smps_mode = IEEE80211_SMPS_AUTOMATIC;
if (band != IEEE80211_BAND_2GHZ) {
- ieee80211_request_smps(vif, smps_mode);
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+ smps_mode);
return;
}
@@ -380,7 +401,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
mvmvif->id, data->notif->bt_status,
data->notif->bt_traffic_load, smps_mode);
- ieee80211_request_smps(vif, smps_mode);
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
/* don't reduce the Tx power if in loose scheme */
if (is_loose_coex())
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 16bbdcc8627a..83da884cf303 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -63,6 +63,7 @@
#include <linux/etherdevice.h>
#include <linux/ip.h>
+#include <linux/fs.h>
#include <net/cfg80211.h>
#include <net/ipv6.h>
#include <net/tcp.h>
@@ -133,7 +134,7 @@ struct wowlan_key_data {
struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc;
struct iwl_wowlan_tkip_params_cmd *tkip;
bool error, use_rsc_tsc, use_tkip;
- int gtk_key_idx;
+ int wep_key_idx;
};
static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
@@ -187,8 +188,8 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
wkc.wep_key.key_offset = 0;
} else {
/* others start at 1 */
- data->gtk_key_idx++;
- wkc.wep_key.key_offset = data->gtk_key_idx;
+ data->wep_key_idx++;
+ wkc.wep_key.key_offset = data->wep_key_idx;
}
ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, CMD_SYNC,
@@ -315,8 +316,13 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
mvm->ptk_ivlen = key->iv_len;
mvm->ptk_icvlen = key->icv_len;
} else {
- data->gtk_key_idx++;
- key->hw_key_idx = data->gtk_key_idx;
+ /*
+ * firmware only supports TSC/RSC for a single key,
+ * so if there are multiple keep overwriting them
+ * with new ones -- this relies on mac80211 doing
+ * list_add_tail().
+ */
+ key->hw_key_idx = 1;
mvm->gtk_ivlen = key->iv_len;
mvm->gtk_icvlen = key->icv_len;
}
@@ -419,8 +425,7 @@ static __le16 pseudo_hdr_check(int len, __be32 saddr, __be32 daddr)
return cpu_to_le16(be16_to_cpu((__force __be16)check));
}
-static void iwl_mvm_build_tcp_packet(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
+static void iwl_mvm_build_tcp_packet(struct ieee80211_vif *vif,
struct cfg80211_wowlan_tcp *tcp,
void *_pkt, u8 *mask,
__le16 *pseudo_hdr_csum,
@@ -566,21 +571,21 @@ static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
/* SYN (TX) */
iwl_mvm_build_tcp_packet(
- mvm, vif, tcp, cfg->syn_tx.data, NULL,
+ vif, tcp, cfg->syn_tx.data, NULL,
&cfg->syn_tx.info.tcp_pseudo_header_checksum,
MVM_TCP_TX_SYN);
cfg->syn_tx.info.tcp_payload_length = 0;
/* SYN/ACK (RX) */
iwl_mvm_build_tcp_packet(
- mvm, vif, tcp, cfg->synack_rx.data, cfg->synack_rx.rx_mask,
+ vif, tcp, cfg->synack_rx.data, cfg->synack_rx.rx_mask,
&cfg->synack_rx.info.tcp_pseudo_header_checksum,
MVM_TCP_RX_SYNACK);
cfg->synack_rx.info.tcp_payload_length = 0;
/* KEEPALIVE/ACK (TX) */
iwl_mvm_build_tcp_packet(
- mvm, vif, tcp, cfg->keepalive_tx.data, NULL,
+ vif, tcp, cfg->keepalive_tx.data, NULL,
&cfg->keepalive_tx.info.tcp_pseudo_header_checksum,
MVM_TCP_TX_DATA);
cfg->keepalive_tx.info.tcp_payload_length =
@@ -604,7 +609,7 @@ static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
/* ACK (RX) */
iwl_mvm_build_tcp_packet(
- mvm, vif, tcp, cfg->keepalive_ack_rx.data,
+ vif, tcp, cfg->keepalive_ack_rx.data,
cfg->keepalive_ack_rx.rx_mask,
&cfg->keepalive_ack_rx.info.tcp_pseudo_header_checksum,
MVM_TCP_RX_ACK);
@@ -612,7 +617,7 @@ static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
/* WAKEUP (RX) */
iwl_mvm_build_tcp_packet(
- mvm, vif, tcp, cfg->wake_rx.data, cfg->wake_rx.rx_mask,
+ vif, tcp, cfg->wake_rx.data, cfg->wake_rx.rx_mask,
&cfg->wake_rx.info.tcp_pseudo_header_checksum,
MVM_TCP_RX_WAKE);
cfg->wake_rx.info.tcp_payload_length =
@@ -620,7 +625,7 @@ static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
/* FIN */
iwl_mvm_build_tcp_packet(
- mvm, vif, tcp, cfg->fin_tx.data, NULL,
+ vif, tcp, cfg->fin_tx.data, NULL,
&cfg->fin_tx.info.tcp_pseudo_header_checksum,
MVM_TCP_TX_FIN);
cfg->fin_tx.info.tcp_payload_length = 0;
@@ -756,7 +761,9 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return 0;
}
-int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan,
+ bool test)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_d3_iter_data suspend_iter_data = {
@@ -769,7 +776,7 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
- struct iwl_d3_manager_config d3_cfg_cmd = {
+ struct iwl_d3_manager_config d3_cfg_cmd_data = {
/*
* Program the minimum sleep time to 10 seconds, as many
* platforms have issues processing a wakeup signal while
@@ -777,17 +784,30 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
*/
.min_sleep_time = cpu_to_le32(10 * 1000 * 1000),
};
+ struct iwl_host_cmd d3_cfg_cmd = {
+ .id = D3_CONFIG_CMD,
+ .flags = CMD_SYNC | CMD_WANT_SKB,
+ .data[0] = &d3_cfg_cmd_data,
+ .len[0] = sizeof(d3_cfg_cmd_data),
+ };
struct wowlan_key_data key_data = {
.use_rsc_tsc = false,
.tkip = &tkip_cmd,
.use_tkip = false,
};
int ret, i;
+ int len __maybe_unused;
u16 seq;
u8 old_aux_sta_id, old_ap_sta_id = IWL_MVM_STATION_COUNT;
- if (WARN_ON(!wowlan))
+ if (!wowlan) {
+ /*
+ * mac80211 shouldn't get here, but for D3 test
+ * it doesn't warrant a warning
+ */
+ WARN_ON(!test);
return -EINVAL;
+ }
key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
if (!key_data.rsc_tsc)
@@ -1007,15 +1027,37 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
if (ret)
goto out;
+ ret = iwl_mvm_power_update_mode(mvm, vif);
+ if (ret)
+ goto out;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvm->d3_wake_sysassert)
+ d3_cfg_cmd_data.wakeup_flags |=
+ cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR);
+#endif
+
/* must be last -- this switches firmware state */
- ret = iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, CMD_SYNC,
- sizeof(d3_cfg_cmd), &d3_cfg_cmd);
+ ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
if (ret)
goto out;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ len = le32_to_cpu(d3_cfg_cmd.resp_pkt->len_n_flags) &
+ FH_RSCSR_FRAME_SIZE_MSK;
+ if (len >= sizeof(u32) * 2) {
+ mvm->d3_test_pme_ptr =
+ le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data);
+ } else if (test) {
+ /* in test mode we require the pointer */
+ ret = -EIO;
+ goto out;
+ }
+#endif
+ iwl_free_resp(&d3_cfg_cmd);
clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
- iwl_trans_d3_suspend(mvm->trans);
+ iwl_trans_d3_suspend(mvm->trans, test);
out:
mvm->aux_sta.sta_id = old_aux_sta_id;
mvm_ap_sta->sta_id = old_ap_sta_id;
@@ -1030,6 +1072,11 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
return ret;
}
+int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+{
+ return __iwl_mvm_suspend(hw, wowlan, false);
+}
+
static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
@@ -1214,9 +1261,28 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
iwl_free_resp(&cmd);
}
-int iwl_mvm_resume(struct ieee80211_hw *hw)
+static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
+{
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ const struct fw_img *img = &mvm->fw->img[IWL_UCODE_WOWLAN];
+ u32 len = img->sec[IWL_UCODE_SECTION_DATA].len;
+ u32 offs = img->sec[IWL_UCODE_SECTION_DATA].offset;
+
+ if (!mvm->store_d3_resume_sram)
+ return;
+
+ if (!mvm->d3_resume_sram) {
+ mvm->d3_resume_sram = kzalloc(len, GFP_KERNEL);
+ if (!mvm->d3_resume_sram)
+ return;
+ }
+
+ iwl_trans_read_mem_bytes(mvm->trans, offs, mvm->d3_resume_sram, len);
+#endif
+}
+
+static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
{
- struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_d3_iter_data resume_iter_data = {
.mvm = mvm,
};
@@ -1236,7 +1302,7 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
vif = resume_iter_data.vif;
- ret = iwl_trans_d3_resume(mvm->trans, &d3_status);
+ ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test);
if (ret)
goto out_unlock;
@@ -1245,12 +1311,15 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
goto out_unlock;
}
+ /* query SRAM first in case we want event logging */
+ iwl_mvm_read_d3_sram(mvm);
+
iwl_mvm_query_wakeup_reasons(mvm, vif);
out_unlock:
mutex_unlock(&mvm->mutex);
- if (vif)
+ if (!test && vif)
ieee80211_resume_disconnect(vif);
/* return 1 to reconfigure the device */
@@ -1258,9 +1327,106 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
return 1;
}
+int iwl_mvm_resume(struct ieee80211_hw *hw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ return __iwl_mvm_resume(mvm, false);
+}
+
void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
device_set_wakeup_enable(mvm->trans->dev, enabled);
}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
+{
+ struct iwl_mvm *mvm = inode->i_private;
+ int err;
+
+ if (mvm->d3_test_active)
+ return -EBUSY;
+
+ file->private_data = inode->i_private;
+
+ ieee80211_stop_queues(mvm->hw);
+ synchronize_net();
+
+ /* start pseudo D3 */
+ rtnl_lock();
+ err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true);
+ rtnl_unlock();
+ if (err > 0)
+ err = -EINVAL;
+ if (err) {
+ ieee80211_wake_queues(mvm->hw);
+ return err;
+ }
+ mvm->d3_test_active = true;
+ return 0;
+}
+
+static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ u32 pme_asserted;
+
+ while (true) {
+ pme_asserted = iwl_trans_read_mem32(mvm->trans,
+ mvm->d3_test_pme_ptr);
+ if (pme_asserted)
+ break;
+ if (msleep_interruptible(100))
+ break;
+ }
+
+ return 0;
+}
+
+static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ if (vif->type == NL80211_IFTYPE_STATION)
+ ieee80211_connection_loss(vif);
+}
+
+static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
+{
+ struct iwl_mvm *mvm = inode->i_private;
+ int remaining_time = 10;
+
+ mvm->d3_test_active = false;
+ __iwl_mvm_resume(mvm, true);
+ iwl_abort_notification_waits(&mvm->notif_wait);
+ ieee80211_restart_hw(mvm->hw);
+
+ /* wait for restart and disconnect all interfaces */
+ while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+ remaining_time > 0) {
+ remaining_time--;
+ msleep(1000);
+ }
+
+ if (remaining_time == 0)
+ IWL_ERR(mvm, "Timed out waiting for HW restart to finish!\n");
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_d3_test_disconn_work_iter, NULL);
+
+ ieee80211_wake_queues(mvm->hw);
+
+ return 0;
+}
+
+const struct file_operations iwl_dbgfs_d3_test_ops = {
+ .llseek = no_llseek,
+ .open = iwl_mvm_d3_test_open,
+ .read = iwl_mvm_d3_test_read,
+ .release = iwl_mvm_d3_test_release,
+};
+#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index 2053dccefcd6..c24a744910ac 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -145,15 +145,18 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
char *buf;
u8 *ptr;
+ if (!mvm->ucode_loaded)
+ return -EINVAL;
+
/* default is to dump the entire data segment */
if (!mvm->dbgfs_sram_offset && !mvm->dbgfs_sram_len) {
- mvm->dbgfs_sram_offset = 0x800000;
- if (!mvm->ucode_loaded)
- return -EINVAL;
img = &mvm->fw->img[mvm->cur_ucode];
- mvm->dbgfs_sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
+ ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
+ len = img->sec[IWL_UCODE_SECTION_DATA].len;
+ } else {
+ ofs = mvm->dbgfs_sram_offset;
+ len = mvm->dbgfs_sram_len;
}
- len = mvm->dbgfs_sram_len;
bufsz = len * 4 + 256;
buf = kzalloc(bufsz, GFP_KERNEL);
@@ -167,12 +170,9 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
}
pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n", len);
- pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
- mvm->dbgfs_sram_offset);
+ pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n", ofs);
- iwl_trans_read_mem_bytes(mvm->trans,
- mvm->dbgfs_sram_offset,
- ptr, len);
+ iwl_trans_read_mem_bytes(mvm->trans, ofs, ptr, len);
for (ofs = 0; ofs < len; ofs += 16) {
pos += scnprintf(buf + pos, bufsz - pos, "0x%.4x ", ofs);
hex_dump_to_buffer(ptr + ofs, 16, 16, 1, buf + pos,
@@ -300,6 +300,168 @@ static ssize_t iwl_dbgfs_power_down_d3_allow_write(struct file *file,
return count;
}
+static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ enum iwl_dbgfs_pm_mask param, int val)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_dbgfs_pm *dbgfs_pm = &mvmvif->dbgfs_pm;
+
+ dbgfs_pm->mask |= param;
+
+ switch (param) {
+ case MVM_DEBUGFS_PM_KEEP_ALIVE: {
+ struct ieee80211_hw *hw = mvm->hw;
+ int dtimper = hw->conf.ps_dtim_period ?: 1;
+ int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
+
+ IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val);
+ if (val * MSEC_PER_SEC < 3 * dtimper_msec) {
+ IWL_WARN(mvm,
+ "debugfs: keep alive period (%ld msec) is less than minimum required (%d msec)\n",
+ val * MSEC_PER_SEC, 3 * dtimper_msec);
+ }
+ dbgfs_pm->keep_alive_seconds = val;
+ break;
+ }
+ case MVM_DEBUGFS_PM_SKIP_OVER_DTIM:
+ IWL_DEBUG_POWER(mvm, "skip_over_dtim %s\n",
+ val ? "enabled" : "disabled");
+ dbgfs_pm->skip_over_dtim = val;
+ break;
+ case MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS:
+ IWL_DEBUG_POWER(mvm, "skip_dtim_periods=%d\n", val);
+ dbgfs_pm->skip_dtim_periods = val;
+ break;
+ case MVM_DEBUGFS_PM_RX_DATA_TIMEOUT:
+ IWL_DEBUG_POWER(mvm, "rx_data_timeout=%d\n", val);
+ dbgfs_pm->rx_data_timeout = val;
+ break;
+ case MVM_DEBUGFS_PM_TX_DATA_TIMEOUT:
+ IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val);
+ dbgfs_pm->tx_data_timeout = val;
+ break;
+ case MVM_DEBUGFS_PM_DISABLE_POWER_OFF:
+ IWL_DEBUG_POWER(mvm, "disable_power_off=%d\n", val);
+ dbgfs_pm->disable_power_off = val;
+ case MVM_DEBUGFS_PM_LPRX_ENA:
+ IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
+ dbgfs_pm->lprx_ena = val;
+ break;
+ case MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD:
+ IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val);
+ dbgfs_pm->lprx_rssi_threshold = val;
+ break;
+ }
+}
+
+static ssize_t iwl_dbgfs_pm_params_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->dbgfs_data;
+ enum iwl_dbgfs_pm_mask param;
+ char buf[32] = {};
+ int val;
+ int ret;
+
+ if (copy_from_user(buf, user_buf, sizeof(buf)))
+ return -EFAULT;
+
+ if (!strncmp("keep_alive=", buf, 11)) {
+ if (sscanf(buf + 11, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_KEEP_ALIVE;
+ } else if (!strncmp("skip_over_dtim=", buf, 15)) {
+ if (sscanf(buf + 15, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_SKIP_OVER_DTIM;
+ } else if (!strncmp("skip_dtim_periods=", buf, 18)) {
+ if (sscanf(buf + 18, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS;
+ } else if (!strncmp("rx_data_timeout=", buf, 16)) {
+ if (sscanf(buf + 16, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_RX_DATA_TIMEOUT;
+ } else if (!strncmp("tx_data_timeout=", buf, 16)) {
+ if (sscanf(buf + 16, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
+ } else if (!strncmp("disable_power_off=", buf, 18)) {
+ if (sscanf(buf + 18, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF;
+ } else if (!strncmp("lprx=", buf, 5)) {
+ if (sscanf(buf + 5, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_LPRX_ENA;
+ } else if (!strncmp("lprx_rssi_threshold=", buf, 20)) {
+ if (sscanf(buf + 20, "%d", &val) != 1)
+ return -EINVAL;
+ if (val > POWER_LPRX_RSSI_THRESHOLD_MAX || val <
+ POWER_LPRX_RSSI_THRESHOLD_MIN)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD;
+ } else {
+ return -EINVAL;
+ }
+
+ mutex_lock(&mvm->mutex);
+ iwl_dbgfs_update_pm(mvm, vif, param, val);
+ ret = iwl_mvm_power_update_mode(mvm, vif);
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->dbgfs_data;
+ struct iwl_powertable_cmd cmd = {};
+ char buf[256];
+ int bufsz = sizeof(buf);
+ int pos = 0;
+
+ iwl_mvm_power_build_cmd(mvm, vif, &cmd);
+
+ pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
+ (cmd.flags &
+ cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
+ 0 : 1);
+ pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
+ le32_to_cpu(cmd.skip_dtim_periods));
+ pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
+ iwlmvm_mod_params.power_scheme);
+ pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
+ le16_to_cpu(cmd.flags));
+ pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
+ cmd.keep_alive_seconds);
+
+ if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
+ pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
+ (cmd.flags &
+ cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ?
+ 1 : 0);
+ pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n",
+ le32_to_cpu(cmd.rx_data_timeout));
+ pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
+ le32_to_cpu(cmd.tx_data_timeout));
+ if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "lprx_rssi_threshold = %d\n",
+ le32_to_cpu(cmd.lprx_rssi_threshold));
+ }
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -481,6 +643,255 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
return count;
}
+static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
+ enum iwl_dbgfs_bf_mask param, int value)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf;
+
+ dbgfs_bf->mask |= param;
+
+ switch (param) {
+ case MVM_DEBUGFS_BF_ENERGY_DELTA:
+ dbgfs_bf->bf_energy_delta = value;
+ break;
+ case MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA:
+ dbgfs_bf->bf_roaming_energy_delta = value;
+ break;
+ case MVM_DEBUGFS_BF_ROAMING_STATE:
+ dbgfs_bf->bf_roaming_state = value;
+ break;
+ case MVM_DEBUGFS_BF_TEMPERATURE_DELTA:
+ dbgfs_bf->bf_temperature_delta = value;
+ break;
+ case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER:
+ dbgfs_bf->bf_enable_beacon_filter = value;
+ break;
+ case MVM_DEBUGFS_BF_DEBUG_FLAG:
+ dbgfs_bf->bf_debug_flag = value;
+ break;
+ case MVM_DEBUGFS_BF_ESCAPE_TIMER:
+ dbgfs_bf->bf_escape_timer = value;
+ break;
+ case MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT:
+ dbgfs_bf->ba_enable_beacon_abort = value;
+ break;
+ case MVM_DEBUGFS_BA_ESCAPE_TIMER:
+ dbgfs_bf->ba_escape_timer = value;
+ break;
+ }
+}
+
+static ssize_t iwl_dbgfs_bf_params_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->dbgfs_data;
+ enum iwl_dbgfs_bf_mask param;
+ char buf[256];
+ int buf_size;
+ int value;
+ int ret = 0;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ if (!strncmp("bf_energy_delta=", buf, 16)) {
+ if (sscanf(buf+16, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_ENERGY_DELTA_MIN ||
+ value > IWL_BF_ENERGY_DELTA_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_ENERGY_DELTA;
+ } else if (!strncmp("bf_roaming_energy_delta=", buf, 24)) {
+ if (sscanf(buf+24, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_ROAMING_ENERGY_DELTA_MIN ||
+ value > IWL_BF_ROAMING_ENERGY_DELTA_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA;
+ } else if (!strncmp("bf_roaming_state=", buf, 17)) {
+ if (sscanf(buf+17, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_ROAMING_STATE_MIN ||
+ value > IWL_BF_ROAMING_STATE_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_ROAMING_STATE;
+ } else if (!strncmp("bf_temperature_delta=", buf, 21)) {
+ if (sscanf(buf+21, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_TEMPERATURE_DELTA_MIN ||
+ value > IWL_BF_TEMPERATURE_DELTA_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_TEMPERATURE_DELTA;
+ } else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) {
+ if (sscanf(buf+24, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER;
+ } else if (!strncmp("bf_debug_flag=", buf, 14)) {
+ if (sscanf(buf+14, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_DEBUG_FLAG;
+ } else if (!strncmp("bf_escape_timer=", buf, 16)) {
+ if (sscanf(buf+16, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_ESCAPE_TIMER_MIN ||
+ value > IWL_BF_ESCAPE_TIMER_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_ESCAPE_TIMER;
+ } else if (!strncmp("ba_escape_timer=", buf, 16)) {
+ if (sscanf(buf+16, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BA_ESCAPE_TIMER_MIN ||
+ value > IWL_BA_ESCAPE_TIMER_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BA_ESCAPE_TIMER;
+ } else if (!strncmp("ba_enable_beacon_abort=", buf, 23)) {
+ if (sscanf(buf+23, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT;
+ } else {
+ return -EINVAL;
+ }
+
+ mutex_lock(&mvm->mutex);
+ iwl_dbgfs_update_bf(vif, param, value);
+ if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value) {
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif);
+ } else {
+ if (mvmvif->bf_enabled)
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif);
+ else
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif);
+ }
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ char buf[256];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ struct iwl_beacon_filter_cmd cmd = {
+ .bf_energy_delta = IWL_BF_ENERGY_DELTA_DEFAULT,
+ .bf_roaming_energy_delta = IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT,
+ .bf_roaming_state = IWL_BF_ROAMING_STATE_DEFAULT,
+ .bf_temperature_delta = IWL_BF_TEMPERATURE_DELTA_DEFAULT,
+ .bf_enable_beacon_filter = IWL_BF_ENABLE_BEACON_FILTER_DEFAULT,
+ .bf_debug_flag = IWL_BF_DEBUG_FLAG_DEFAULT,
+ .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT),
+ .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_DEFAULT),
+ .ba_enable_beacon_abort = IWL_BA_ENABLE_BEACON_ABORT_DEFAULT,
+ };
+
+ iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
+ if (mvmvif->bf_enabled)
+ cmd.bf_enable_beacon_filter = 1;
+ else
+ cmd.bf_enable_beacon_filter = 0;
+
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_energy_delta = %d\n",
+ cmd.bf_energy_delta);
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_energy_delta = %d\n",
+ cmd.bf_roaming_energy_delta);
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_state = %d\n",
+ cmd.bf_roaming_state);
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_temperature_delta = %d\n",
+ cmd.bf_temperature_delta);
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_enable_beacon_filter = %d\n",
+ cmd.bf_enable_beacon_filter);
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_debug_flag = %d\n",
+ cmd.bf_debug_flag);
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_escape_timer = %d\n",
+ cmd.bf_escape_timer);
+ pos += scnprintf(buf+pos, bufsz-pos, "ba_escape_timer = %d\n",
+ cmd.ba_escape_timer);
+ pos += scnprintf(buf+pos, bufsz-pos, "ba_enable_beacon_abort = %d\n",
+ cmd.ba_enable_beacon_abort);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static ssize_t iwl_dbgfs_d3_sram_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ char buf[8] = {};
+ int store;
+
+ if (copy_from_user(buf, user_buf, sizeof(buf)))
+ return -EFAULT;
+
+ if (sscanf(buf, "%d", &store) != 1)
+ return -EINVAL;
+
+ mvm->store_d3_resume_sram = store;
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ const struct fw_img *img;
+ int ofs, len, pos = 0;
+ size_t bufsz, ret;
+ char *buf;
+ u8 *ptr = mvm->d3_resume_sram;
+
+ img = &mvm->fw->img[IWL_UCODE_WOWLAN];
+ len = img->sec[IWL_UCODE_SECTION_DATA].len;
+
+ bufsz = len * 4 + 256;
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pos += scnprintf(buf, bufsz, "D3 SRAM capture: %sabled\n",
+ mvm->store_d3_resume_sram ? "en" : "dis");
+
+ if (ptr) {
+ for (ofs = 0; ofs < len; ofs += 16) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "0x%.4x ", ofs);
+ hex_dump_to_buffer(ptr + ofs, 16, 16, 1, buf + pos,
+ bufsz - pos, false);
+ pos += strlen(buf + pos);
+ if (bufsz - pos > 0)
+ buf[pos++] = '\n';
+ }
+ } else {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "(no data captured)\n");
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+
+ kfree(buf);
+
+ return ret;
+}
+#endif
+
#define MVM_DEBUGFS_READ_FILE_OPS(name) \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.read = iwl_dbgfs_##name##_read, \
@@ -524,9 +935,14 @@ MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
MVM_DEBUGFS_WRITE_FILE_OPS(power_down_allow);
MVM_DEBUGFS_WRITE_FILE_OPS(power_down_d3_allow);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart);
+#ifdef CONFIG_PM_SLEEP
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram);
+#endif
/* Interface specific debugfs entries */
MVM_DEBUGFS_READ_FILE_OPS(mac_params);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params);
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
{
@@ -542,6 +958,13 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(power_down_allow, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(power_down_d3_allow, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
+#ifdef CONFIG_PM_SLEEP
+ MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, S_IRUSR);
+ if (!debugfs_create_bool("d3_wake_sysassert", S_IRUSR | S_IWUSR,
+ mvm->debugfs_dir, &mvm->d3_wake_sysassert))
+ goto err;
+#endif
/*
* Create a symlink with mac80211. It will be removed when mac80211
@@ -565,7 +988,11 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
char buf[100];
- if (!dbgfs_dir)
+ /*
+ * Check if debugfs directory already exist before creating it.
+ * This may happen when, for example, resetting hw or suspend-resume
+ */
+ if (!dbgfs_dir || mvmvif->dbgfs_dir)
return;
mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
@@ -577,9 +1004,19 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return;
}
+ if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
+ vif->type == NL80211_IFTYPE_STATION && !vif->p2p)
+ MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR |
+ S_IRUSR);
+
MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir,
S_IRUSR);
+ if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
+ mvmvif == mvm->bf_allowed_vif)
+ MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir,
+ S_IRUSR | S_IWUSR);
+
/*
* Create symlink for convenience pointing to interface specific
* debugfs entries for the driver. For example, under
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index 51e015d1dfb2..6f8b2c16ae17 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -75,13 +75,15 @@ enum iwl_d3_wakeup_flags {
* struct iwl_d3_manager_config - D3 manager configuration command
* @min_sleep_time: minimum sleep time (in usec)
* @wakeup_flags: wakeup flags, see &enum iwl_d3_wakeup_flags
+ * @wakeup_host_timer: force wakeup after this many seconds
*
* The structure is used for the D3_CONFIG_CMD command.
*/
struct iwl_d3_manager_config {
__le32 min_sleep_time;
__le32 wakeup_flags;
-} __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_3 */
+ __le32 wakeup_host_timer;
+} __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_4 */
/* TODO: OFFLOADS_QUERY_API_S_VER_1 */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
index d68640ea41d4..98b1feb43d38 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
@@ -71,7 +71,13 @@
#define MAC_INDEX_MIN_DRIVER 0
#define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX
-#define AC_NUM 4 /* Number of access categories */
+enum iwl_ac {
+ AC_BK,
+ AC_BE,
+ AC_VI,
+ AC_VO,
+ AC_NUM,
+};
/**
* enum iwl_mac_protection_flags - MAC context flags
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index 81fe45f46be7..a6da359a80c3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -66,6 +66,11 @@
/* Power Management Commands, Responses, Notifications */
+/* Radio LP RX Energy Threshold measured in dBm */
+#define POWER_LPRX_RSSI_THRESHOLD 75
+#define POWER_LPRX_RSSI_THRESHOLD_MAX 94
+#define POWER_LPRX_RSSI_THRESHOLD_MIN 30
+
/**
* enum iwl_scan_flags - masks for power table command flags
* @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
@@ -101,20 +106,107 @@ enum iwl_power_flags {
* @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to
* PSM transition - legacy PM
* @sleep_interval: not in use
- * @keep_alive_beacons: not in use
+ * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag
+ * is set. For example, if it is required to skip over
+ * one DTIM, this value need to be set to 2 (DTIM periods).
* @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled.
* Default: 80dbm
*/
struct iwl_powertable_cmd {
- /* PM_POWER_TABLE_CMD_API_S_VER_5 */
+ /* PM_POWER_TABLE_CMD_API_S_VER_6 */
__le16 flags;
u8 keep_alive_seconds;
u8 debug_flags;
__le32 rx_data_timeout;
__le32 tx_data_timeout;
__le32 sleep_interval[IWL_POWER_VEC_SIZE];
- __le32 keep_alive_beacons;
+ __le32 skip_dtim_periods;
__le32 lprx_rssi_threshold;
} __packed;
+/**
+ * struct iwl_beacon_filter_cmd
+ * REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
+ * @id_and_color: MAC contex identifier
+ * @bf_energy_delta: Used for RSSI filtering, if in 'normal' state. Send beacon
+ * to driver if delta in Energy values calculated for this and last
+ * passed beacon is greater than this threshold. Zero value means that
+ * the Energy change is ignored for beacon filtering, and beacon will
+ * not be forced to be sent to driver regardless of this delta. Typical
+ * energy delta 5dB.
+ * @bf_roaming_energy_delta: Used for RSSI filtering, if in 'roaming' state.
+ * Send beacon to driver if delta in Energy values calculated for this
+ * and last passed beacon is greater than this threshold. Zero value
+ * means that the Energy change is ignored for beacon filtering while in
+ * Roaming state, typical energy delta 1dB.
+ * @bf_roaming_state: Used for RSSI filtering. If absolute Energy values
+ * calculated for current beacon is less than the threshold, use
+ * Roaming Energy Delta Threshold, otherwise use normal Energy Delta
+ * Threshold. Typical energy threshold is -72dBm.
+ * @bf_temperature_delta: Send Beacon to driver if delta in temperature values
+ * calculated for this and the last passed beacon is greater than this
+ * threshold. Zero value means that the temperature changeis ignored for
+ * beacon filtering; beacons will not be forced to be sent to driver
+ * regardless of whether its temerature has been changed.
+ * @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled.
+ * @bf_filter_escape_timer: Send beacons to to driver if no beacons were passed
+ * for a specific period of time. Units: Beacons.
+ * @ba_escape_timer: Fully receive and parse beacon if no beacons were passed
+ * for a longer period of time then this escape-timeout. Units: Beacons.
+ * @ba_enable_beacon_abort: 1, beacon abort is enabled; 0, disabled.
+ */
+struct iwl_beacon_filter_cmd {
+ u8 bf_energy_delta;
+ u8 bf_roaming_energy_delta;
+ u8 bf_roaming_state;
+ u8 bf_temperature_delta;
+ u8 bf_enable_beacon_filter;
+ u8 bf_debug_flag;
+ __le16 reserved1;
+ __le32 bf_escape_timer;
+ __le32 ba_escape_timer;
+ u8 ba_enable_beacon_abort;
+ u8 reserved2[3];
+} __packed;
+
+/* Beacon filtering and beacon abort */
+#define IWL_BF_ENERGY_DELTA_DEFAULT 5
+#define IWL_BF_ENERGY_DELTA_MAX 255
+#define IWL_BF_ENERGY_DELTA_MIN 0
+
+#define IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT 1
+#define IWL_BF_ROAMING_ENERGY_DELTA_MAX 255
+#define IWL_BF_ROAMING_ENERGY_DELTA_MIN 0
+
+#define IWL_BF_ROAMING_STATE_DEFAULT 72
+#define IWL_BF_ROAMING_STATE_MAX 255
+#define IWL_BF_ROAMING_STATE_MIN 0
+
+#define IWL_BF_TEMPERATURE_DELTA_DEFAULT 5
+#define IWL_BF_TEMPERATURE_DELTA_MAX 255
+#define IWL_BF_TEMPERATURE_DELTA_MIN 0
+
+#define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1
+
+#define IWL_BF_DEBUG_FLAG_DEFAULT 0
+
+#define IWL_BF_ESCAPE_TIMER_DEFAULT 50
+#define IWL_BF_ESCAPE_TIMER_MAX 1024
+#define IWL_BF_ESCAPE_TIMER_MIN 0
+
+#define IWL_BA_ESCAPE_TIMER_DEFAULT 3
+#define IWL_BA_ESCAPE_TIMER_MAX 1024
+#define IWL_BA_ESCAPE_TIMER_MIN 0
+
+#define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1
+
+#define IWL_BF_CMD_CONFIG_DEFAULTS \
+ .bf_energy_delta = IWL_BF_ENERGY_DELTA_DEFAULT, \
+ .bf_roaming_energy_delta = IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT, \
+ .bf_roaming_state = IWL_BF_ROAMING_STATE_DEFAULT, \
+ .bf_temperature_delta = IWL_BF_TEMPERATURE_DELTA_DEFAULT, \
+ .bf_debug_flag = IWL_BF_DEBUG_FLAG_DEFAULT, \
+ .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT), \
+ .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_DEFAULT)
+
#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index b60d14151721..365095a0c3b3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -69,7 +69,6 @@
/* Scan Commands, Responses, Notifications */
/* Masks for iwl_scan_channel.type flags */
-#define SCAN_CHANNEL_TYPE_PASSIVE 0
#define SCAN_CHANNEL_TYPE_ACTIVE BIT(0)
#define SCAN_CHANNEL_NARROW_BAND BIT(22)
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
index 007a93b25bd7..700cce731770 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
@@ -134,6 +134,7 @@ enum iwl_tx_flags {
#define TX_CMD_SEC_WEP 0x01
#define TX_CMD_SEC_CCM 0x02
#define TX_CMD_SEC_TKIP 0x03
+#define TX_CMD_SEC_MSK 0x07
#define TX_CMD_SEC_WEP_KEY_IDX_POS 6
#define TX_CMD_SEC_WEP_KEY_IDX_MSK 0xc0
#define TX_CMD_SEC_KEY128 0x08
@@ -227,10 +228,11 @@ struct iwl_tx_cmd {
__le16 len;
__le16 next_frame_len;
__le32 tx_flags;
- /* DRAM_SCRATCH_API_U_VER_1 */
- u8 try_cnt;
- u8 btkill_cnt;
- __le16 reserved;
+ struct {
+ u8 try_cnt;
+ u8 btkill_cnt;
+ __le16 reserved;
+ } scratch; /* DRAM_SCRATCH_API_U_VER_1 */
__le32 rate_n_flags;
u8 sta_id;
u8 sec_ctl;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index c6384555aab4..cbfb3beae783 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -139,6 +139,9 @@ enum {
/* Power */
POWER_TABLE_CMD = 0x77,
+ /* Thermal Throttling*/
+ REPLY_THERMAL_MNG_BACKOFF = 0x7e,
+
/* Scanning */
SCAN_REQUEST_CMD = 0x80,
SCAN_ABORT_CMD = 0x81,
@@ -161,6 +164,8 @@ enum {
CARD_STATE_CMD = 0xa0,
CARD_STATE_NOTIFICATION = 0xa1,
+ MISSED_BEACONS_NOTIFICATION = 0xa2,
+
REPLY_RX_PHY_CMD = 0xc0,
REPLY_RX_MPDU_CMD = 0xc1,
BA_NOTIF = 0xc5,
@@ -170,6 +175,8 @@ enum {
BT_COEX_PROT_ENV = 0xcd,
BT_PROFILE_NOTIFICATION = 0xce,
+ REPLY_BEACON_FILTERING_CMD = 0xd2,
+
REPLY_DEBUG_CMD = 0xf0,
DEBUG_LOG_MSG = 0xf7,
@@ -938,6 +945,24 @@ struct iwl_card_state_notif {
} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */
/**
+ * struct iwl_missed_beacons_notif - information on missed beacons
+ * ( MISSED_BEACONS_NOTIFICATION = 0xa2 )
+ * @mac_id: interface ID
+ * @consec_missed_beacons_since_last_rx: number of consecutive missed
+ * beacons since last RX.
+ * @consec_missed_beacons: number of consecutive missed beacons
+ * @num_expected_beacons:
+ * @num_recvd_beacons:
+ */
+struct iwl_missed_beacons_notif {
+ __le32 mac_id;
+ __le32 consec_missed_beacons_since_last_rx;
+ __le32 consec_missed_beacons;
+ __le32 num_expected_beacons;
+ __le32 num_recvd_beacons;
+} __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */
+
+/**
* struct iwl_set_calib_default_cmd - set default value for calibration.
* ( SET_CALIB_DEFAULT_CMD = 0x8e )
* @calib_index: the calibration to set value for
@@ -975,4 +1000,212 @@ struct iwl_mcast_filter_cmd {
u8 addr_list[0];
} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */
+struct mvm_statistics_dbg {
+ __le32 burst_check;
+ __le32 burst_count;
+ __le32 wait_for_silence_timeout_cnt;
+ __le32 reserved[3];
+} __packed; /* STATISTICS_DEBUG_API_S_VER_2 */
+
+struct mvm_statistics_div {
+ __le32 tx_on_a;
+ __le32 tx_on_b;
+ __le32 exec_time;
+ __le32 probe_time;
+ __le32 rssi_ant;
+ __le32 reserved2;
+} __packed; /* STATISTICS_SLOW_DIV_API_S_VER_2 */
+
+struct mvm_statistics_general_common {
+ __le32 temperature; /* radio temperature */
+ __le32 temperature_m; /* radio voltage */
+ struct mvm_statistics_dbg dbg;
+ __le32 sleep_time;
+ __le32 slots_out;
+ __le32 slots_idle;
+ __le32 ttl_timestamp;
+ struct mvm_statistics_div div;
+ __le32 rx_enable_counter;
+ /*
+ * num_of_sos_states:
+ * count the number of times we have to re-tune
+ * in order to get out of bad PHY status
+ */
+ __le32 num_of_sos_states;
+} __packed; /* STATISTICS_GENERAL_API_S_VER_5 */
+
+struct mvm_statistics_rx_non_phy {
+ __le32 bogus_cts; /* CTS received when not expecting CTS */
+ __le32 bogus_ack; /* ACK received when not expecting ACK */
+ __le32 non_bssid_frames; /* number of frames with BSSID that
+ * doesn't belong to the STA BSSID */
+ __le32 filtered_frames; /* count frames that were dumped in the
+ * filtering process */
+ __le32 non_channel_beacons; /* beacons with our bss id but not on
+ * our serving channel */
+ __le32 channel_beacons; /* beacons with our bss id and in our
+ * serving channel */
+ __le32 num_missed_bcon; /* number of missed beacons */
+ __le32 adc_rx_saturation_time; /* count in 0.8us units the time the
+ * ADC was in saturation */
+ __le32 ina_detection_search_time;/* total time (in 0.8us) searched
+ * for INA */
+ __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */
+ __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */
+ __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */
+ __le32 interference_data_flag; /* flag for interference data
+ * availability. 1 when data is
+ * available. */
+ __le32 channel_load; /* counts RX Enable time in uSec */
+ __le32 dsp_false_alarms; /* DSP false alarm (both OFDM
+ * and CCK) counter */
+ __le32 beacon_rssi_a;
+ __le32 beacon_rssi_b;
+ __le32 beacon_rssi_c;
+ __le32 beacon_energy_a;
+ __le32 beacon_energy_b;
+ __le32 beacon_energy_c;
+ __le32 num_bt_kills;
+ __le32 mac_id;
+ __le32 directed_data_mpdu;
+} __packed; /* STATISTICS_RX_NON_PHY_API_S_VER_3 */
+
+struct mvm_statistics_rx_phy {
+ __le32 ina_cnt;
+ __le32 fina_cnt;
+ __le32 plcp_err;
+ __le32 crc32_err;
+ __le32 overrun_err;
+ __le32 early_overrun_err;
+ __le32 crc32_good;
+ __le32 false_alarm_cnt;
+ __le32 fina_sync_err_cnt;
+ __le32 sfd_timeout;
+ __le32 fina_timeout;
+ __le32 unresponded_rts;
+ __le32 rxe_frame_limit_overrun;
+ __le32 sent_ack_cnt;
+ __le32 sent_cts_cnt;
+ __le32 sent_ba_rsp_cnt;
+ __le32 dsp_self_kill;
+ __le32 mh_format_err;
+ __le32 re_acq_main_rssi_sum;
+ __le32 reserved;
+} __packed; /* STATISTICS_RX_PHY_API_S_VER_2 */
+
+struct mvm_statistics_rx_ht_phy {
+ __le32 plcp_err;
+ __le32 overrun_err;
+ __le32 early_overrun_err;
+ __le32 crc32_good;
+ __le32 crc32_err;
+ __le32 mh_format_err;
+ __le32 agg_crc32_good;
+ __le32 agg_mpdu_cnt;
+ __le32 agg_cnt;
+ __le32 unsupport_mcs;
+} __packed; /* STATISTICS_HT_RX_PHY_API_S_VER_1 */
+
+#define MAX_CHAINS 3
+
+struct mvm_statistics_tx_non_phy_agg {
+ __le32 ba_timeout;
+ __le32 ba_reschedule_frames;
+ __le32 scd_query_agg_frame_cnt;
+ __le32 scd_query_no_agg;
+ __le32 scd_query_agg;
+ __le32 scd_query_mismatch;
+ __le32 frame_not_ready;
+ __le32 underrun;
+ __le32 bt_prio_kill;
+ __le32 rx_ba_rsp_cnt;
+ __s8 txpower[MAX_CHAINS];
+ __s8 reserved;
+ __le32 reserved2;
+} __packed; /* STATISTICS_TX_NON_PHY_AGG_API_S_VER_1 */
+
+struct mvm_statistics_tx_channel_width {
+ __le32 ext_cca_narrow_ch20[1];
+ __le32 ext_cca_narrow_ch40[2];
+ __le32 ext_cca_narrow_ch80[3];
+ __le32 ext_cca_narrow_ch160[4];
+ __le32 last_tx_ch_width_indx;
+ __le32 rx_detected_per_ch_width[4];
+ __le32 success_per_ch_width[4];
+ __le32 fail_per_ch_width[4];
+}; /* STATISTICS_TX_CHANNEL_WIDTH_API_S_VER_1 */
+
+struct mvm_statistics_tx {
+ __le32 preamble_cnt;
+ __le32 rx_detected_cnt;
+ __le32 bt_prio_defer_cnt;
+ __le32 bt_prio_kill_cnt;
+ __le32 few_bytes_cnt;
+ __le32 cts_timeout;
+ __le32 ack_timeout;
+ __le32 expected_ack_cnt;
+ __le32 actual_ack_cnt;
+ __le32 dump_msdu_cnt;
+ __le32 burst_abort_next_frame_mismatch_cnt;
+ __le32 burst_abort_missing_next_frame_cnt;
+ __le32 cts_timeout_collision;
+ __le32 ack_or_ba_timeout_collision;
+ struct mvm_statistics_tx_non_phy_agg agg;
+ struct mvm_statistics_tx_channel_width channel_width;
+} __packed; /* STATISTICS_TX_API_S_VER_4 */
+
+
+struct mvm_statistics_bt_activity {
+ __le32 hi_priority_tx_req_cnt;
+ __le32 hi_priority_tx_denied_cnt;
+ __le32 lo_priority_tx_req_cnt;
+ __le32 lo_priority_tx_denied_cnt;
+ __le32 hi_priority_rx_req_cnt;
+ __le32 hi_priority_rx_denied_cnt;
+ __le32 lo_priority_rx_req_cnt;
+ __le32 lo_priority_rx_denied_cnt;
+} __packed; /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */
+
+struct mvm_statistics_general {
+ struct mvm_statistics_general_common common;
+ __le32 beacon_filtered;
+ __le32 missed_beacons;
+ __s8 beacon_filter_everage_energy;
+ __s8 beacon_filter_reason;
+ __s8 beacon_filter_current_energy;
+ __s8 beacon_filter_reserved;
+ __le32 beacon_filter_delta_time;
+ struct mvm_statistics_bt_activity bt_activity;
+} __packed; /* STATISTICS_GENERAL_API_S_VER_5 */
+
+struct mvm_statistics_rx {
+ struct mvm_statistics_rx_phy ofdm;
+ struct mvm_statistics_rx_phy cck;
+ struct mvm_statistics_rx_non_phy general;
+ struct mvm_statistics_rx_ht_phy ofdm_ht;
+} __packed; /* STATISTICS_RX_API_S_VER_3 */
+
+/*
+ * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
+ *
+ * By default, uCode issues this notification after receiving a beacon
+ * while associated. To disable this behavior, set DISABLE_NOTIF flag in the
+ * REPLY_STATISTICS_CMD 0x9c, above.
+ *
+ * Statistics counters continue to increment beacon after beacon, but are
+ * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
+ * 0x9c with CLEAR_STATS bit set (see above).
+ *
+ * uCode also issues this notification during scans. uCode clears statistics
+ * appropriately so that each notification contains statistics for only the
+ * one channel that has just been scanned.
+ */
+
+struct iwl_notif_statistics { /* STATISTICS_NTFY_API_S_VER_8 */
+ __le32 flag;
+ struct mvm_statistics_rx rx;
+ struct mvm_statistics_tx tx;
+ struct mvm_statistics_general general;
+} __packed;
+
#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index e18c92dd60ec..cd7c0032cc58 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -326,6 +326,17 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
WARN_ON(ret);
+ /*
+ * abort after reading the nvm in case RF Kill is on, we will complete
+ * the init seq later when RF kill will switch to off
+ */
+ if (iwl_mvm_is_radio_killed(mvm)) {
+ IWL_DEBUG_RF_KILL(mvm,
+ "jump over all phy activities due to RF kill\n");
+ iwl_remove_notification(&mvm->notif_wait, &calib_wait);
+ return 1;
+ }
+
/* Send TX valid antennas before triggering calibrations */
ret = iwl_send_tx_ant_cfg(mvm, iwl_fw_valid_tx_ant(mvm->fw));
if (ret)
@@ -388,6 +399,8 @@ out:
int iwl_mvm_up(struct iwl_mvm *mvm)
{
int ret, i;
+ struct ieee80211_channel *chan;
+ struct cfg80211_chan_def chandef;
lockdep_assert_held(&mvm->mutex);
@@ -400,8 +413,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
ret = iwl_run_init_mvm_ucode(mvm, false);
if (ret && !iwlmvm_mod_params.init_dbg) {
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
+ /* this can't happen */
+ if (WARN_ON(ret > 0))
+ ret = -ERFKILL;
goto error;
}
+ /* should stop & start HW since that INIT image just loaded */
+ iwl_trans_stop_hw(mvm->trans, false);
+ ret = iwl_trans_start_hw(mvm->trans);
+ if (ret)
+ return ret;
}
if (iwlmvm_mod_params.init_dbg)
@@ -443,8 +464,22 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret)
goto error;
- IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
+ /* Add all the PHY contexts */
+ chan = &mvm->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0];
+ cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
+ for (i = 0; i < NUM_PHY_CTX; i++) {
+ /*
+ * The channel used here isn't relevant as it's
+ * going to be overwritten in the other flows.
+ * For now use the first channel we have.
+ */
+ ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i],
+ &chandef, 1, 1);
+ if (ret)
+ goto error;
+ }
+ IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
return 0;
error:
iwl_trans_stop_device(mvm->trans);
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index b2cc3d98e0f7..94aae9c8562c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -193,14 +193,11 @@ static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
- u32 qmask, ac;
+ u32 qmask = 0, ac;
if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
return BIT(IWL_MVM_OFFCHANNEL_QUEUE);
- qmask = (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE) ?
- BIT(vif->cab_queue) : 0;
-
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
qmask |= BIT(vif->hw_queue[ac]);
@@ -227,7 +224,7 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
.found_vif = false,
};
u32 ac;
- int ret;
+ int ret, i;
/*
* Allocate a MAC ID and a TSF for this MAC, along with the queues
@@ -335,6 +332,9 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
mvmvif->bcast_sta.sta_id = IWL_MVM_STATION_COUNT;
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+ for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++)
+ mvmvif->smps_requests[i] = IEEE80211_SMPS_AUTOMATIC;
+
return 0;
exit_fail:
@@ -362,7 +362,7 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
break;
case NL80211_IFTYPE_AP:
iwl_trans_ac_txq_enable(mvm->trans, vif->cab_queue,
- IWL_MVM_TX_FIFO_VO);
+ IWL_MVM_TX_FIFO_MCAST);
/* fall through */
default:
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
@@ -550,6 +550,10 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
cmd->ac[i].fifos_mask = BIT(iwl_mvm_ac_to_tx_fifo[i]);
}
+ /* in AP mode, the MCAST FIFO takes the EDCA params from VO */
+ if (vif->type == NL80211_IFTYPE_AP)
+ cmd->ac[AC_VO].fifos_mask |= BIT(IWL_MVM_TX_FIFO_MCAST);
+
if (vif->bss_conf.qos)
cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
@@ -861,6 +865,30 @@ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
return ret;
}
+struct iwl_mvm_mac_ap_iterator_data {
+ struct iwl_mvm *mvm;
+ struct ieee80211_vif *vif;
+ u32 beacon_device_ts;
+ u16 beacon_int;
+};
+
+/* Find the beacon_device_ts and beacon_int for a managed interface */
+static void iwl_mvm_mac_ap_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_mac_ap_iterator_data *data = _data;
+
+ if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc)
+ return;
+
+ /* Station client has higher priority over P2P client*/
+ if (vif->p2p && data->beacon_device_ts)
+ return;
+
+ data->beacon_device_ts = vif->bss_conf.sync_device_ts;
+ data->beacon_int = vif->bss_conf.beacon_int;
+}
+
/*
* Fill the specific data for mac context of type AP of P2P GO
*/
@@ -870,6 +898,11 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
bool add)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_mac_ap_iterator_data data = {
+ .mvm = mvm,
+ .vif = vif,
+ .beacon_device_ts = 0
+ };
ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int);
ctxt_ap->bi_reciprocal =
@@ -883,16 +916,33 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue);
/*
- * Only read the system time when the MAC is being added, when we
+ * Only set the beacon time when the MAC is being added, when we
* just modify the MAC then we should keep the time -- the firmware
* can otherwise have a "jumping" TBTT.
*/
- if (add)
- mvmvif->ap_beacon_time =
- iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
+ if (add) {
+ /*
+ * If there is a station/P2P client interface which is
+ * associated, set the AP's TBTT far enough from the station's
+ * TBTT. Otherwise, set it to the current system time
+ */
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ iwl_mvm_mac_ap_iterator, &data);
+
+ if (data.beacon_device_ts) {
+ u32 rand = (prandom_u32() % (80 - 20)) + 20;
+ mvmvif->ap_beacon_time = data.beacon_device_ts +
+ ieee80211_tu_to_usec(data.beacon_int * rand /
+ 100);
+ } else {
+ mvmvif->ap_beacon_time =
+ iwl_read_prph(mvm->trans,
+ DEVICE_SYSTEM_TIME_REG);
+ }
+ }
ctxt_ap->beacon_time = cpu_to_le32(mvmvif->ap_beacon_time);
-
ctxt_ap->beacon_tsf = 0; /* unused */
/* TODO: Assume that the beacon id == mac context id */
@@ -1047,3 +1097,28 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
rate);
return 0;
}
+
+static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ u16 *id = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (mvmvif->id == *id)
+ ieee80211_beacon_loss(vif);
+}
+
+int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_missed_beacons_notif *missed_beacons = (void *)pkt->data;
+ u16 id = (u16)le32_to_cpu(missed_beacons->mac_id);
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_beacon_loss_iterator,
+ &id);
+ return 0;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index a5eb8c82f16a..f19baf0dea6b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -81,12 +81,12 @@
static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
{
.max = 1,
- .types = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP),
+ .types = BIT(NL80211_IFTYPE_STATION),
},
{
.max = 1,
- .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ .types = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO),
},
{
@@ -127,6 +127,17 @@ static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
};
#endif
+static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
+{
+ int i;
+
+ memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts));
+ for (i = 0; i < NUM_PHY_CTX; i++) {
+ mvm->phy_ctxts[i].id = i;
+ mvm->phy_ctxts[i].ref = 0;
+ }
+}
+
int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
{
struct ieee80211_hw *hw = mvm->hw;
@@ -141,7 +152,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_TIMING_BEACON_ONLY;
+ IEEE80211_HW_TIMING_BEACON_ONLY |
+ IEEE80211_HW_CONNECTION_MONITOR;
hw->queues = IWL_MVM_FIRST_AGG_QUEUE;
hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
@@ -158,7 +170,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->sta_data_size = sizeof(struct iwl_mvm_sta);
hw->vif_data_size = sizeof(struct iwl_mvm_vif);
- hw->chanctx_data_size = sizeof(struct iwl_mvm_phy_ctxt);
+ hw->chanctx_data_size = sizeof(u16);
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
@@ -193,6 +205,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->n_addresses++;
}
+ iwl_mvm_reset_phy_ctxts(mvm);
+
/* we create the 802.11 header and a max-length SSID element */
hw->wiphy->max_scan_ie_len =
mvm->fw->ucode_capa.max_probe_length - 24 - 34;
@@ -222,20 +236,20 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
mvm->trans->ops->d3_suspend &&
mvm->trans->ops->d3_resume &&
device_can_wakeup(mvm->trans->dev)) {
- hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
- WIPHY_WOWLAN_DISCONNECT |
- WIPHY_WOWLAN_EAP_IDENTITY_REQ |
- WIPHY_WOWLAN_RFKILL_RELEASE;
+ mvm->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_EAP_IDENTITY_REQ |
+ WIPHY_WOWLAN_RFKILL_RELEASE;
if (!iwlwifi_mod_params.sw_crypto)
- hw->wiphy->wowlan.flags |=
- WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
- WIPHY_WOWLAN_GTK_REKEY_FAILURE |
- WIPHY_WOWLAN_4WAY_HANDSHAKE;
-
- hw->wiphy->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
- hw->wiphy->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
- hw->wiphy->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
- hw->wiphy->wowlan.tcp = &iwl_mvm_wowlan_tcp_support;
+ mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE |
+ WIPHY_WOWLAN_4WAY_HANDSHAKE;
+
+ mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
+ mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
+ mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
+ mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support;
+ hw->wiphy->wowlan = &mvm->wowlan;
}
#endif
@@ -243,7 +257,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
if (ret)
return ret;
- return ieee80211_register_hw(mvm->hw);
+ ret = ieee80211_register_hw(mvm->hw);
+ if (ret)
+ iwl_mvm_leds_exit(mvm);
+
+ return ret;
}
static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
@@ -252,8 +270,8 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- if (test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status)) {
- IWL_DEBUG_DROP(mvm, "Dropping - RF KILL\n");
+ if (iwl_mvm_is_radio_killed(mvm)) {
+ IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
goto drop;
}
@@ -345,8 +363,7 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
spin_unlock_bh(&mvm->time_event_lock);
- if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
- mvmvif->phy_ctxt = NULL;
+ mvmvif->phy_ctxt = NULL;
}
static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
@@ -363,12 +380,16 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
iwl_mvm_cleanup_iterator, mvm);
+ mvm->p2p_device_vif = NULL;
+
+ iwl_mvm_reset_phy_ctxts(mvm);
memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
ieee80211_wake_queues(mvm->hw);
mvm->vif_count = 0;
+ mvm->rx_ba_sessions = 0;
}
static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
@@ -456,6 +477,20 @@ static void iwl_mvm_power_update_iterator(void *data, u8 *mac,
iwl_mvm_power_update_mode(mvm, vif);
}
+static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
+{
+ u16 i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ for (i = 0; i < NUM_PHY_CTX; i++)
+ if (!mvm->phy_ctxts[i].ref)
+ return &mvm->phy_ctxts[i];
+
+ IWL_ERR(mvm, "No available PHY context\n");
+ return NULL;
+}
+
static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -477,6 +512,27 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
goto out_unlock;
/*
+ * TODO: remove this temporary code.
+ * Currently MVM FW supports power management only on single MAC.
+ * If new interface added, disable PM on existing interface.
+ * P2P device is a special case, since it is handled by FW similary to
+ * scan. If P2P deviced is added, PM remains enabled on existing
+ * interface.
+ * Note: the method below does not count the new interface being added
+ * at this moment.
+ */
+ if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
+ mvm->vif_count++;
+ if (mvm->vif_count > 1) {
+ IWL_DEBUG_MAC80211(mvm,
+ "Disable power on existing interfaces\n");
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_pm_disable_iterator, mvm);
+ }
+
+ /*
* The AP binding flow can be done only after the beacon
* template is configured (which happens only in the mac80211
* start_ap() flow), and adding the broadcast station can happen
@@ -499,27 +555,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
goto out_unlock;
}
- /*
- * TODO: remove this temporary code.
- * Currently MVM FW supports power management only on single MAC.
- * If new interface added, disable PM on existing interface.
- * P2P device is a special case, since it is handled by FW similary to
- * scan. If P2P deviced is added, PM remains enabled on existing
- * interface.
- * Note: the method below does not count the new interface being added
- * at this moment.
- */
- if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
- mvm->vif_count++;
- if (mvm->vif_count > 1) {
- IWL_DEBUG_MAC80211(mvm,
- "Disable power on existing interfaces\n");
- ieee80211_iterate_active_interfaces_atomic(
- mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_pm_disable_iterator, mvm);
- }
-
ret = iwl_mvm_mac_ctxt_add(mvm, vif);
if (ret)
goto out_release;
@@ -530,32 +565,34 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
*/
iwl_mvm_power_update_mode(mvm, vif);
+ /* beacon filtering */
+ if (!mvm->bf_allowed_vif &&
+ vif->type == NL80211_IFTYPE_STATION && !vif->p2p){
+ mvm->bf_allowed_vif = mvmvif;
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
+ }
+
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif);
+ if (ret)
+ goto out_release;
+
/*
* P2P_DEVICE interface does not have a channel context assigned to it,
* so a dedicated PHY context is allocated to it and the corresponding
* MAC context is bound to it at this stage.
*/
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
- struct ieee80211_channel *chan;
- struct cfg80211_chan_def chandef;
- mvmvif->phy_ctxt = &mvm->phy_ctxt_roc;
-
- /*
- * The channel used here isn't relevant as it's
- * going to be overwritten as part of the ROC flow.
- * For now use the first channel we have.
- */
- chan = &mvm->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0];
- cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
- ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt,
- &chandef, 1, 1);
- if (ret)
+ mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
+ if (!mvmvif->phy_ctxt) {
+ ret = -ENOSPC;
goto out_remove_mac;
+ }
+ iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
ret = iwl_mvm_binding_add_vif(mvm, vif);
if (ret)
- goto out_remove_phy;
+ goto out_unref_phy;
ret = iwl_mvm_add_bcast_sta(mvm, vif, &mvmvif->bcast_sta);
if (ret)
@@ -571,27 +608,17 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
out_unbind:
iwl_mvm_binding_remove_vif(mvm, vif);
- out_remove_phy:
- iwl_mvm_phy_ctxt_remove(mvm, mvmvif->phy_ctxt);
+ out_unref_phy:
+ iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
out_remove_mac:
mvmvif->phy_ctxt = NULL;
iwl_mvm_mac_ctxt_remove(mvm, vif);
out_release:
- /*
- * TODO: remove this temporary code.
- * Currently MVM FW supports power management only on single MAC.
- * Check if only one additional interface remains after releasing
- * current one. Update power mode on the remaining interface.
- */
if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
mvm->vif_count--;
- IWL_DEBUG_MAC80211(mvm, "Currently %d interfaces active\n",
- mvm->vif_count);
- if (mvm->vif_count == 1) {
- ieee80211_iterate_active_interfaces(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_power_update_iterator, mvm);
- }
+ ieee80211_iterate_active_interfaces(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_power_update_iterator, mvm);
iwl_mvm_mac_ctxt_release(mvm, vif);
out_unlock:
mutex_unlock(&mvm->mutex);
@@ -629,8 +656,7 @@ static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
* By now, all the AC queues are empty. The AGG queues are
* empty too. We already got all the Tx responses for all the
* packets in the queues. The drain work can have been
- * triggered. Flush it. This work item takes the mutex, so kill
- * it before we take it.
+ * triggered. Flush it.
*/
flush_work(&mvm->sta_drained_wk);
}
@@ -646,6 +672,11 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
+ if (mvm->bf_allowed_vif == mvmvif) {
+ mvm->bf_allowed_vif = NULL;
+ vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
+ }
+
iwl_mvm_vif_dbgfs_clean(mvm, vif);
/*
@@ -661,7 +692,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
mvm->p2p_device_vif = NULL;
iwl_mvm_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
iwl_mvm_binding_remove_vif(mvm, vif);
- iwl_mvm_phy_ctxt_remove(mvm, mvmvif->phy_ctxt);
+ iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
mvmvif->phy_ctxt = NULL;
}
@@ -748,7 +779,10 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
if (ret)
IWL_ERR(mvm, "failed to update quotas\n");
}
- } else if (changes & BSS_CHANGED_DTIM_PERIOD) {
+ ret = iwl_mvm_power_update_mode(mvm, vif);
+ if (ret)
+ IWL_ERR(mvm, "failed to update power mode\n");
+ } else if (changes & BSS_CHANGED_BEACON_INFO) {
/*
* We received a beacon _after_ association so
* remove the session protection.
@@ -756,19 +790,9 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
iwl_mvm_remove_time_event(mvm, mvmvif,
&mvmvif->time_event_data);
} else if (changes & BSS_CHANGED_PS) {
- /*
- * TODO: remove this temporary code.
- * Currently MVM FW supports power management only on single
- * MAC. Avoid power mode update if more than one interface
- * is active.
- */
- IWL_DEBUG_MAC80211(mvm, "Currently %d interfaces active\n",
- mvm->vif_count);
- if (mvm->vif_count == 1) {
- ret = iwl_mvm_power_update_mode(mvm, vif);
- if (ret)
- IWL_ERR(mvm, "failed to update power mode\n");
- }
+ ret = iwl_mvm_power_update_mode(mvm, vif);
+ if (ret)
+ IWL_ERR(mvm, "failed to update power mode\n");
}
}
@@ -987,6 +1011,21 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE) {
+ /*
+ * Firmware bug - it'll crash if the beacon interval is less
+ * than 16. We can't avoid connecting at all, so refuse the
+ * station state change, this will cause mac80211 to abandon
+ * attempts to connect to this AP, and eventually wpa_s will
+ * blacklist the AP...
+ */
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ vif->bss_conf.beacon_int < 16) {
+ IWL_ERR(mvm,
+ "AP %pM beacon interval is %d, refusing due to firmware bug!\n",
+ sta->addr, vif->bss_conf.beacon_int);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
ret = iwl_mvm_add_sta(mvm, vif, sta);
} else if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_AUTH) {
@@ -999,9 +1038,13 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
mvmvif->phy_ctxt->channel->band);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
+ /* enable beacon filtering */
+ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif));
ret = 0;
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
+ /* disable beacon filtering */
+ WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif));
ret = 0;
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH) {
@@ -1015,6 +1058,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
} else {
ret = -EIO;
}
+ out_unlock:
mutex_unlock(&mvm->mutex);
return ret;
@@ -1167,29 +1211,107 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
enum ieee80211_roc_type type)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct cfg80211_chan_def chandef;
- int ret;
+ struct iwl_mvm_phy_ctxt *phy_ctxt;
+ int ret, i;
+
+ IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
+ duration, type);
if (vif->type != NL80211_IFTYPE_P2P_DEVICE) {
IWL_ERR(mvm, "vif isn't a P2P_DEVICE: %d\n", vif->type);
return -EINVAL;
}
- IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
- duration, type);
-
mutex_lock(&mvm->mutex);
+ for (i = 0; i < NUM_PHY_CTX; i++) {
+ phy_ctxt = &mvm->phy_ctxts[i];
+ if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt)
+ continue;
+
+ if (phy_ctxt->ref && channel == phy_ctxt->channel) {
+ /*
+ * Unbind the P2P_DEVICE from the current PHY context,
+ * and if the PHY context is not used remove it.
+ */
+ ret = iwl_mvm_binding_remove_vif(mvm, vif);
+ if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
+ goto out_unlock;
+
+ iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
+
+ /* Bind the P2P_DEVICE to the current PHY Context */
+ mvmvif->phy_ctxt = phy_ctxt;
+
+ ret = iwl_mvm_binding_add_vif(mvm, vif);
+ if (WARN(ret, "Failed binding P2P_DEVICE\n"))
+ goto out_unlock;
+
+ iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
+ goto schedule_time_event;
+ }
+ }
+
+ /* Need to update the PHY context only if the ROC channel changed */
+ if (channel == mvmvif->phy_ctxt->channel)
+ goto schedule_time_event;
+
cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
- ret = iwl_mvm_phy_ctxt_changed(mvm, &mvm->phy_ctxt_roc,
- &chandef, 1, 1);
+ /*
+ * Change the PHY context configuration as it is currently referenced
+ * only by the P2P Device MAC
+ */
+ if (mvmvif->phy_ctxt->ref == 1) {
+ ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
+ &chandef, 1, 1);
+ if (ret)
+ goto out_unlock;
+ } else {
+ /*
+ * The PHY context is shared with other MACs. Need to remove the
+ * P2P Device from the binding, allocate an new PHY context and
+ * create a new binding
+ */
+ phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
+ if (!phy_ctxt) {
+ ret = -ENOSPC;
+ goto out_unlock;
+ }
+
+ ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
+ 1, 1);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to change PHY context\n");
+ goto out_unlock;
+ }
+
+ /* Unbind the P2P_DEVICE from the current PHY context */
+ ret = iwl_mvm_binding_remove_vif(mvm, vif);
+ if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
+ goto out_unlock;
+
+ iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
+
+ /* Bind the P2P_DEVICE to the new allocated PHY context */
+ mvmvif->phy_ctxt = phy_ctxt;
+
+ ret = iwl_mvm_binding_add_vif(mvm, vif);
+ if (WARN(ret, "Failed binding P2P_DEVICE\n"))
+ goto out_unlock;
+
+ iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
+ }
+
+schedule_time_event:
/* Schedule the time events */
ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
+out_unlock:
mutex_unlock(&mvm->mutex);
IWL_DEBUG_MAC80211(mvm, "leave\n");
-
return ret;
}
@@ -1211,15 +1333,30 @@ static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv;
+ u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
+ struct iwl_mvm_phy_ctxt *phy_ctxt;
int ret;
+ IWL_DEBUG_MAC80211(mvm, "Add channel context\n");
+
mutex_lock(&mvm->mutex);
+ phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
+ if (!phy_ctxt) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def,
+ ctx->rx_chains_static,
+ ctx->rx_chains_dynamic);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to add PHY context\n");
+ goto out;
+ }
- IWL_DEBUG_MAC80211(mvm, "Add PHY context\n");
- ret = iwl_mvm_phy_ctxt_add(mvm, phy_ctxt, &ctx->def,
- ctx->rx_chains_static,
- ctx->rx_chains_dynamic);
+ iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt);
+ *phy_ctxt_id = phy_ctxt->id;
+out:
mutex_unlock(&mvm->mutex);
return ret;
}
@@ -1228,10 +1365,11 @@ static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv;
+ u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
+ struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
mutex_lock(&mvm->mutex);
- iwl_mvm_phy_ctxt_remove(mvm, phy_ctxt);
+ iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt);
mutex_unlock(&mvm->mutex);
}
@@ -1240,7 +1378,16 @@ static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
u32 changed)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv;
+ u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
+ struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
+
+ if (WARN_ONCE((phy_ctxt->ref > 1) &&
+ (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
+ IEEE80211_CHANCTX_CHANGE_RX_CHAINS |
+ IEEE80211_CHANCTX_CHANGE_RADAR)),
+ "Cannot change PHY. Ref=%d, changed=0x%X\n",
+ phy_ctxt->ref, changed))
+ return;
mutex_lock(&mvm->mutex);
iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def,
@@ -1254,13 +1401,14 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- struct iwl_mvm_phy_ctxt *phyctx = (void *)ctx->drv_priv;
+ u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
+ struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
mutex_lock(&mvm->mutex);
- mvmvif->phy_ctxt = phyctx;
+ mvmvif->phy_ctxt = phy_ctxt;
switch (vif->type) {
case NL80211_IFTYPE_AP:
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index 9f46b23801bc..420e82d379d9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -73,7 +73,6 @@
#include "iwl-trans.h"
#include "iwl-notif-wait.h"
#include "iwl-eeprom-parse.h"
-#include "iwl-test.h"
#include "iwl-trans.h"
#include "sta.h"
#include "fw-api.h"
@@ -88,6 +87,7 @@ enum iwl_mvm_tx_fifo {
IWL_MVM_TX_FIFO_BE,
IWL_MVM_TX_FIFO_VI,
IWL_MVM_TX_FIFO_VO,
+ IWL_MVM_TX_FIFO_MCAST = 5,
};
extern struct ieee80211_ops iwl_mvm_hw_ops;
@@ -109,6 +109,7 @@ extern struct iwl_mvm_mod_params iwlmvm_mod_params;
struct iwl_mvm_phy_ctxt {
u16 id;
u16 color;
+ u32 ref;
/*
* TODO: This should probably be removed. Currently here only for rate
@@ -149,6 +150,64 @@ enum iwl_power_scheme {
#define IWL_CONN_MAX_LISTEN_INTERVAL 70
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+enum iwl_dbgfs_pm_mask {
+ MVM_DEBUGFS_PM_KEEP_ALIVE = BIT(0),
+ MVM_DEBUGFS_PM_SKIP_OVER_DTIM = BIT(1),
+ MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2),
+ MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3),
+ MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4),
+ MVM_DEBUGFS_PM_DISABLE_POWER_OFF = BIT(5),
+ MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
+ MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
+};
+
+struct iwl_dbgfs_pm {
+ u8 keep_alive_seconds;
+ u32 rx_data_timeout;
+ u32 tx_data_timeout;
+ bool skip_over_dtim;
+ u8 skip_dtim_periods;
+ bool disable_power_off;
+ bool lprx_ena;
+ u32 lprx_rssi_threshold;
+ int mask;
+};
+
+/* beacon filtering */
+
+enum iwl_dbgfs_bf_mask {
+ MVM_DEBUGFS_BF_ENERGY_DELTA = BIT(0),
+ MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA = BIT(1),
+ MVM_DEBUGFS_BF_ROAMING_STATE = BIT(2),
+ MVM_DEBUGFS_BF_TEMPERATURE_DELTA = BIT(3),
+ MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(4),
+ MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(5),
+ MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(6),
+ MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(7),
+ MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(8),
+};
+
+struct iwl_dbgfs_bf {
+ u8 bf_energy_delta;
+ u8 bf_roaming_energy_delta;
+ u8 bf_roaming_state;
+ u8 bf_temperature_delta;
+ u8 bf_enable_beacon_filter;
+ u8 bf_debug_flag;
+ u32 bf_escape_timer;
+ u32 ba_escape_timer;
+ u8 ba_enable_beacon_abort;
+ int mask;
+};
+#endif
+
+enum iwl_mvm_smps_type_request {
+ IWL_MVM_SMPS_REQ_BT_COEX,
+ IWL_MVM_SMPS_REQ_TT,
+ NUM_IWL_MVM_SMPS_REQ,
+};
+
/**
* struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context
* @id: between 0 and 3
@@ -163,6 +222,8 @@ enum iwl_power_scheme {
* @bcast_sta: station used for broadcast packets. Used by the following
* vifs: P2P_DEVICE, GO and AP.
* @beacon_skb: the skb used to hold the AP/GO beacon template
+ * @smps_requests: the requests of of differents parts of the driver, regard
+ the desired smps mode.
*/
struct iwl_mvm_vif {
u16 id;
@@ -172,6 +233,8 @@ struct iwl_mvm_vif {
bool uploaded;
bool ap_active;
bool monitor_active;
+ /* indicate whether beacon filtering is enabled */
+ bool bf_enabled;
u32 ap_beacon_time;
@@ -214,7 +277,11 @@ struct iwl_mvm_vif {
struct dentry *dbgfs_dir;
struct dentry *dbgfs_slink;
void *dbgfs_data;
+ struct iwl_dbgfs_pm dbgfs_pm;
+ struct iwl_dbgfs_bf dbgfs_bf;
#endif
+
+ enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ];
};
static inline struct iwl_mvm_vif *
@@ -223,12 +290,6 @@ iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
return (void *)vif->drv_priv;
}
-enum iwl_mvm_status {
- IWL_MVM_STATUS_HW_RFKILL,
- IWL_MVM_STATUS_ROC_RUNNING,
- IWL_MVM_STATUS_IN_HW_RESTART,
-};
-
enum iwl_scan_status {
IWL_MVM_SCAN_NONE,
IWL_MVM_SCAN_OS,
@@ -246,6 +307,65 @@ struct iwl_nvm_section {
const u8 *data;
};
+/*
+ * Tx-backoff threshold
+ * @temperature: The threshold in Celsius
+ * @backoff: The tx-backoff in uSec
+ */
+struct iwl_tt_tx_backoff {
+ s32 temperature;
+ u32 backoff;
+};
+
+#define TT_TX_BACKOFF_SIZE 6
+
+/**
+ * struct iwl_tt_params - thermal throttling parameters
+ * @ct_kill_entry: CT Kill entry threshold
+ * @ct_kill_exit: CT Kill exit threshold
+ * @ct_kill_duration: The time intervals (in uSec) in which the driver needs
+ * to checks whether to exit CT Kill.
+ * @dynamic_smps_entry: Dynamic SMPS entry threshold
+ * @dynamic_smps_exit: Dynamic SMPS exit threshold
+ * @tx_protection_entry: TX protection entry threshold
+ * @tx_protection_exit: TX protection exit threshold
+ * @tx_backoff: Array of thresholds for tx-backoff , in ascending order.
+ * @support_ct_kill: Support CT Kill?
+ * @support_dynamic_smps: Support dynamic SMPS?
+ * @support_tx_protection: Support tx protection?
+ * @support_tx_backoff: Support tx-backoff?
+ */
+struct iwl_tt_params {
+ s32 ct_kill_entry;
+ s32 ct_kill_exit;
+ u32 ct_kill_duration;
+ s32 dynamic_smps_entry;
+ s32 dynamic_smps_exit;
+ s32 tx_protection_entry;
+ s32 tx_protection_exit;
+ struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE];
+ bool support_ct_kill;
+ bool support_dynamic_smps;
+ bool support_tx_protection;
+ bool support_tx_backoff;
+};
+
+/**
+ * struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure
+ * @ct_kill_exit: worker to exit thermal kill
+ * @dynamic_smps: Is thermal throttling enabled dynamic_smps?
+ * @tx_backoff: The current thremal throttling tx backoff in uSec.
+ * @params: Parameters to configure the thermal throttling algorithm.
+ * @throttle: Is thermal throttling is active?
+ */
+struct iwl_mvm_tt_mgmt {
+ struct delayed_work ct_kill_exit;
+ bool dynamic_smps;
+ u32 tx_backoff;
+ const struct iwl_tt_params *params;
+ bool throttle;
+};
+
struct iwl_mvm {
/* for logger access */
struct device *dev;
@@ -266,6 +386,12 @@ struct iwl_mvm {
unsigned long status;
+ /*
+ * for beacon filtering -
+ * currently only one interface can be supported
+ */
+ struct iwl_mvm_vif *bf_allowed_vif;
+
enum iwl_ucode_type cur_ucode;
bool ucode_loaded;
bool init_ucode_run;
@@ -293,6 +419,7 @@ struct iwl_mvm {
struct work_struct sta_drained_wk;
unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
atomic_t pending_frames[IWL_MVM_STATION_COUNT];
+ u8 rx_ba_sessions;
/* configured by mac80211 */
u32 rts_threshold;
@@ -313,7 +440,7 @@ struct iwl_mvm {
bool prevent_power_down_d3;
#endif
- struct iwl_mvm_phy_ctxt phy_ctxt_roc;
+ struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX];
struct list_head time_event_list;
spinlock_t time_event_lock;
@@ -337,12 +464,24 @@ struct iwl_mvm {
struct ieee80211_vif *p2p_device_vif;
#ifdef CONFIG_PM_SLEEP
+ struct wiphy_wowlan_support wowlan;
int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ u32 d3_wake_sysassert; /* must be u32 for debugfs_create_bool */
+ bool d3_test_active;
+ bool store_d3_resume_sram;
+ void *d3_resume_sram;
+ u32 d3_test_pme_ptr;
+#endif
#endif
/* BT-Coex */
u8 bt_kill_msk;
struct iwl_bt_coex_profile_notif last_bt_notif;
+
+ /* Thermal Throttling and CTkill */
+ struct iwl_mvm_tt_mgmt thermal_throttle;
+ s32 temperature; /* Celsius */
};
/* Extract MVM priv from op_mode and _hw */
@@ -352,6 +491,19 @@ struct iwl_mvm {
#define IWL_MAC80211_GET_MVM(_hw) \
IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv))
+enum iwl_mvm_status {
+ IWL_MVM_STATUS_HW_RFKILL,
+ IWL_MVM_STATUS_HW_CTKILL,
+ IWL_MVM_STATUS_ROC_RUNNING,
+ IWL_MVM_STATUS_IN_HW_RESTART,
+};
+
+static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
+{
+ return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status) ||
+ test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
+}
+
extern const u8 iwl_mvm_ac_to_tx_fifo[];
struct iwl_rate_info {
@@ -443,8 +595,10 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
struct cfg80211_chan_def *chandef,
u8 chains_static, u8 chains_dynamic);
-void iwl_mvm_phy_ctxt_remove(struct iwl_mvm *mvm,
- struct iwl_mvm_phy_ctxt *ctxt);
+void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm,
+ struct iwl_mvm_phy_ctxt *ctxt);
+void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm,
+ struct iwl_mvm_phy_ctxt *ctxt);
/* MAC (virtual interface) programming */
int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@@ -459,6 +613,9 @@ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
/* Bindings */
int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@@ -523,6 +680,7 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
struct inet6_dev *idev);
void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int idx);
+extern const struct file_operations iwl_dbgfs_d3_test_ops;
/* BT Coex */
int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
@@ -534,4 +692,31 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_rssi_event rssi_event);
void iwl_mvm_bt_coex_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+/* beacon filtering */
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void
+iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
+ struct iwl_beacon_filter_cmd *cmd);
+#else
+static inline void
+iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
+ struct iwl_beacon_filter_cmd *cmd)
+{}
+#endif
+int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+
+/* SMPS */
+void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_smps_type_request req_type,
+ enum ieee80211_smps_mode smps_request);
+
+/* Thermal management and CT-kill */
+void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
+void iwl_mvm_tt_initialize(struct iwl_mvm *mvm);
+void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
+void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
+
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index b8ec02f89acc..edb94ea31654 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -60,6 +60,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
+#include <linux/firmware.h>
#include "iwl-trans.h"
#include "mvm.h"
#include "iwl-eeprom-parse.h"
@@ -75,31 +76,56 @@ static const int nvm_to_read[] = {
};
/* Default NVM size to read */
-#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024);
+#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
+#define IWL_MAX_NVM_SECTION_SIZE 6000
-static inline void iwl_nvm_fill_read(struct iwl_nvm_access_cmd *cmd,
- u16 offset, u16 length, u16 section)
+#define NVM_WRITE_OPCODE 1
+#define NVM_READ_OPCODE 0
+
+/*
+ * prepare the NVM host command w/ the pointers to the nvm buffer
+ * and send it to fw
+ */
+static int iwl_nvm_write_chunk(struct iwl_mvm *mvm, u16 section,
+ u16 offset, u16 length, const u8 *data)
{
- cmd->offset = cpu_to_le16(offset);
- cmd->length = cpu_to_le16(length);
- cmd->type = cpu_to_le16(section);
+ struct iwl_nvm_access_cmd nvm_access_cmd = {
+ .offset = cpu_to_le16(offset),
+ .length = cpu_to_le16(length),
+ .type = cpu_to_le16(section),
+ .op_code = NVM_WRITE_OPCODE,
+ };
+ struct iwl_host_cmd cmd = {
+ .id = NVM_ACCESS_CMD,
+ .len = { sizeof(struct iwl_nvm_access_cmd), length },
+ .flags = CMD_SYNC | CMD_SEND_IN_RFKILL,
+ .data = { &nvm_access_cmd, data },
+ /* data may come from vmalloc, so use _DUP */
+ .dataflags = { 0, IWL_HCMD_DFL_DUP },
+ };
+
+ return iwl_mvm_send_cmd(mvm, &cmd);
}
static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
u16 offset, u16 length, u8 *data)
{
- struct iwl_nvm_access_cmd nvm_access_cmd = {};
+ struct iwl_nvm_access_cmd nvm_access_cmd = {
+ .offset = cpu_to_le16(offset),
+ .length = cpu_to_le16(length),
+ .type = cpu_to_le16(section),
+ .op_code = NVM_READ_OPCODE,
+ };
struct iwl_nvm_access_resp *nvm_resp;
struct iwl_rx_packet *pkt;
struct iwl_host_cmd cmd = {
.id = NVM_ACCESS_CMD,
- .flags = CMD_SYNC | CMD_WANT_SKB,
+ .flags = CMD_SYNC | CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
.data = { &nvm_access_cmd, },
};
int ret, bytes_read, offset_read;
u8 *resp_data;
- iwl_nvm_fill_read(&nvm_access_cmd, offset, length, section);
cmd.len[0] = sizeof(struct iwl_nvm_access_cmd);
ret = iwl_mvm_send_cmd(mvm, &cmd);
@@ -144,6 +170,30 @@ exit:
return ret;
}
+static int iwl_nvm_write_section(struct iwl_mvm *mvm, u16 section,
+ const u8 *data, u16 length)
+{
+ int offset = 0;
+
+ /* copy data in chunks of 2k (and remainder if any) */
+
+ while (offset < length) {
+ int chunk_size, ret;
+
+ chunk_size = min(IWL_NVM_DEFAULT_CHUNK_SIZE,
+ length - offset);
+
+ ret = iwl_nvm_write_chunk(mvm, section, offset,
+ chunk_size, data + offset);
+ if (ret < 0)
+ return ret;
+
+ offset += chunk_size;
+ }
+
+ return 0;
+}
+
/*
* Reads an NVM section completely.
* NICs prior to 7000 family doesn't have a real NVM, but just read
@@ -177,7 +227,8 @@ static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section,
offset += ret;
}
- IWL_INFO(mvm, "NVM section %d read completed\n", section);
+ IWL_DEBUG_EEPROM(mvm->trans->dev,
+ "NVM section %d read completed\n", section);
return offset;
}
@@ -200,7 +251,130 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
hw = (const __le16 *)sections[NVM_SECTION_TYPE_HW].data;
sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
- return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib);
+ return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
+ iwl_fw_valid_tx_ant(mvm->fw),
+ iwl_fw_valid_rx_ant(mvm->fw));
+}
+
+#define MAX_NVM_FILE_LEN 16384
+
+/*
+ * HOW TO CREATE THE NVM FILE FORMAT:
+ * ------------------------------
+ * 1. create hex file, format:
+ * 3800 -> header
+ * 0000 -> header
+ * 5a40 -> data
+ *
+ * rev - 6 bit (word1)
+ * len - 10 bit (word1)
+ * id - 4 bit (word2)
+ * rsv - 12 bit (word2)
+ *
+ * 2. flip 8bits with 8 bits per line to get the right NVM file format
+ *
+ * 3. create binary file from the hex file
+ *
+ * 4. save as "iNVM_xxx.bin" under /lib/firmware
+ */
+static int iwl_mvm_load_external_nvm(struct iwl_mvm *mvm)
+{
+ int ret, section_id, section_size;
+ const struct firmware *fw_entry;
+ const struct {
+ __le16 word1;
+ __le16 word2;
+ u8 data[];
+ } *file_sec;
+ const u8 *eof;
+
+#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
+#define NVM_WORD2_ID(x) (x >> 12)
+
+ /*
+ * Obtain NVM image via request_firmware. Since we already used
+ * request_firmware_nowait() for the firmware binary load and only
+ * get here after that we assume the NVM request can be satisfied
+ * synchronously.
+ */
+ ret = request_firmware(&fw_entry, iwlwifi_mod_params.nvm_file,
+ mvm->trans->dev);
+ if (ret) {
+ IWL_ERR(mvm, "ERROR: %s isn't available %d\n",
+ iwlwifi_mod_params.nvm_file, ret);
+ return ret;
+ }
+
+ IWL_INFO(mvm, "Loaded NVM file %s (%zu bytes)\n",
+ iwlwifi_mod_params.nvm_file, fw_entry->size);
+
+ if (fw_entry->size < sizeof(*file_sec)) {
+ IWL_ERR(mvm, "NVM file too small\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (fw_entry->size > MAX_NVM_FILE_LEN) {
+ IWL_ERR(mvm, "NVM file too large\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ eof = fw_entry->data + fw_entry->size;
+
+ file_sec = (void *)fw_entry->data;
+
+ while (true) {
+ if (file_sec->data > eof) {
+ IWL_ERR(mvm,
+ "ERROR - NVM file too short for section header\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ /* check for EOF marker */
+ if (!file_sec->word1 && !file_sec->word2) {
+ ret = 0;
+ break;
+ }
+
+ section_size = 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
+ section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
+
+ if (section_size > IWL_MAX_NVM_SECTION_SIZE) {
+ IWL_ERR(mvm, "ERROR - section too large (%d)\n",
+ section_size);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!section_size) {
+ IWL_ERR(mvm, "ERROR - section empty\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ if (file_sec->data + section_size > eof) {
+ IWL_ERR(mvm,
+ "ERROR - NVM file too short for section (%d bytes)\n",
+ section_size);
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = iwl_nvm_write_section(mvm, section_id, file_sec->data,
+ section_size);
+ if (ret < 0) {
+ IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret);
+ break;
+ }
+
+ /* advance to the next section */
+ file_sec = (void *)(file_sec->data + section_size);
+ }
+out:
+ release_firmware(fw_entry);
+ return ret;
}
int iwl_nvm_init(struct iwl_mvm *mvm)
@@ -208,6 +382,17 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
int ret, i, section;
u8 *nvm_buffer, *temp;
+ /* load external NVM if configured */
+ if (iwlwifi_mod_params.nvm_file) {
+ /* move to External NVM flow */
+ ret = iwl_mvm_load_external_nvm(mvm);
+ if (ret)
+ return ret;
+ }
+
+ /* Read From FW NVM */
+ IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
+
/* TODO: find correct NVM max size for a section */
nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size,
GFP_KERNEL);
@@ -231,8 +416,9 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
if (ret < 0)
return ret;
- ret = 0;
mvm->nvm_data = iwl_parse_nvm_sections(mvm);
+ if (!mvm->nvm_data)
+ return -ENODATA;
- return ret;
+ return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index b29c31a41594..af79a14063a9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -215,17 +215,22 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(REPLY_RX_PHY_CMD, iwl_mvm_rx_rx_phy_cmd, false),
RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false),
RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false),
+
+ RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true),
+ RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, false),
+ RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, true),
+
RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false),
RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false),
- RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true),
- RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, false),
-
RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false),
RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
+ RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
+ false),
+
RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, false),
};
#undef RX_HANDLER
@@ -288,11 +293,14 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(NET_DETECT_HOTSPOTS_CMD),
CMD(NET_DETECT_HOTSPOTS_QUERY_CMD),
CMD(CARD_STATE_NOTIFICATION),
+ CMD(MISSED_BEACONS_NOTIFICATION),
CMD(BT_COEX_PRIO_TABLE),
CMD(BT_COEX_PROT_ENV),
CMD(BT_PROFILE_NOTIFICATION),
CMD(BT_CONFIG),
CMD(MCAST_FILTER_CMD),
+ CMD(REPLY_BEACON_FILTERING_CMD),
+ CMD(REPLY_THERMAL_MNG_BACKOFF),
};
#undef CMD
@@ -393,10 +401,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (err)
goto out_free;
+ iwl_mvm_tt_initialize(mvm);
+
mutex_lock(&mvm->mutex);
err = iwl_run_init_mvm_ucode(mvm, true);
mutex_unlock(&mvm->mutex);
- if (err && !iwlmvm_mod_params.init_dbg) {
+ /* returns 0 if successful, 1 if success but in rfkill */
+ if (err < 0 && !iwlmvm_mod_params.init_dbg) {
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
goto out_free;
}
@@ -439,10 +450,16 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
iwl_mvm_leds_exit(mvm);
+ iwl_mvm_tt_exit(mvm);
+
ieee80211_unregister_hw(mvm->hw);
kfree(mvm->scan_cmd);
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
+ kfree(mvm->d3_resume_sram);
+#endif
+
iwl_trans_stop_hw(mvm->trans, true);
iwl_phy_db_free(mvm->phy_db);
@@ -589,6 +606,16 @@ static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
ieee80211_wake_queue(mvm->hw, mq);
}
+void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
+{
+ if (state)
+ set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
+ else
+ clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
+
+ wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
+}
+
static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
@@ -598,7 +625,7 @@ static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
else
clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
- wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state);
+ wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
}
static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
index a28a1d1f23eb..a8652ddd6bed 100644
--- a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
@@ -195,21 +195,6 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
return ret;
}
-
-struct phy_ctx_used_data {
- unsigned long used[BITS_TO_LONGS(NUM_PHY_CTX)];
-};
-
-static void iwl_mvm_phy_ctx_used_iter(struct ieee80211_hw *hw,
- struct ieee80211_chanctx_conf *ctx,
- void *_data)
-{
- struct phy_ctx_used_data *data = _data;
- struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv;
-
- __set_bit(phy_ctxt->id, data->used);
-}
-
/*
* Send a command to add a PHY context based on the current HW configuration.
*/
@@ -217,34 +202,28 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
struct cfg80211_chan_def *chandef,
u8 chains_static, u8 chains_dynamic)
{
- struct phy_ctx_used_data data = {
- .used = { },
- };
-
- /*
- * If this is a regular PHY context (not the ROC one)
- * skip the ROC PHY context's ID.
- */
- if (ctxt != &mvm->phy_ctxt_roc)
- __set_bit(mvm->phy_ctxt_roc.id, data.used);
+ int ret;
+ WARN_ON(!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+ ctxt->ref);
lockdep_assert_held(&mvm->mutex);
- ctxt->color++;
- if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
- ieee80211_iter_chan_contexts_atomic(
- mvm->hw, iwl_mvm_phy_ctx_used_iter, &data);
+ ctxt->channel = chandef->chan;
+ ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+ chains_static, chains_dynamic,
+ FW_CTXT_ACTION_ADD, 0);
- ctxt->id = find_first_zero_bit(data.used, NUM_PHY_CTX);
- if (WARN_ONCE(ctxt->id == NUM_PHY_CTX,
- "Failed to init PHY context - no free ID!\n"))
- return -EIO;
- }
+ return ret;
+}
- ctxt->channel = chandef->chan;
- return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
- chains_static, chains_dynamic,
- FW_CTXT_ACTION_ADD, 0);
+/*
+ * Update the number of references to the given PHY context. This is valid only
+ * in case the PHY context was already created, i.e., its reference count > 0.
+ */
+void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
+{
+ lockdep_assert_held(&mvm->mutex);
+ ctxt->ref++;
}
/*
@@ -264,23 +243,12 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
FW_CTXT_ACTION_MODIFY, 0);
}
-/*
- * Send a command to the FW to remove the given phy context.
- * Once the command is sent, regardless of success or failure, the context is
- * marked as invalid
- */
-void iwl_mvm_phy_ctxt_remove(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
+void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
{
- struct iwl_phy_context_cmd cmd;
- int ret;
-
lockdep_assert_held(&mvm->mutex);
- iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, FW_CTXT_ACTION_REMOVE, 0);
- ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, CMD_SYNC,
- sizeof(struct iwl_phy_context_cmd),
- &cmd);
- if (ret)
- IWL_ERR(mvm, "Failed to send PHY remove: ctxt id=%d\n",
- ctxt->id);
+ if (WARN_ON_ONCE(!ctxt))
+ return;
+
+ ctxt->ref--;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index ed77e437aac4..e7ca965a89b8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -75,6 +75,54 @@
#define POWER_KEEP_ALIVE_PERIOD_SEC 25
+static int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
+ struct iwl_beacon_filter_cmd *cmd)
+{
+ int ret;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, CMD_SYNC,
+ sizeof(struct iwl_beacon_filter_cmd), cmd);
+
+ if (!ret) {
+ IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n",
+ cmd->ba_enable_beacon_abort);
+ IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n",
+ cmd->ba_escape_timer);
+ IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n",
+ cmd->bf_debug_flag);
+ IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n",
+ cmd->bf_enable_beacon_filter);
+ IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n",
+ cmd->bf_energy_delta);
+ IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n",
+ cmd->bf_escape_timer);
+ IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n",
+ cmd->bf_roaming_energy_delta);
+ IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n",
+ cmd->bf_roaming_state);
+ IWL_DEBUG_POWER(mvm, "bf_temperature_delta is: %d\n",
+ cmd->bf_temperature_delta);
+ }
+ return ret;
+}
+
+static int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, bool enable)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_beacon_filter_cmd cmd = {
+ IWL_BF_CMD_CONFIG_DEFAULTS,
+ .bf_enable_beacon_filter = 1,
+ .ba_enable_beacon_abort = enable,
+ };
+
+ if (!mvmvif->bf_enabled)
+ return 0;
+
+ iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
+ return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
+}
+
static void iwl_mvm_power_log(struct iwl_mvm *mvm,
struct iwl_powertable_cmd *cmd)
{
@@ -89,8 +137,12 @@ static void iwl_mvm_power_log(struct iwl_mvm *mvm,
le32_to_cpu(cmd->rx_data_timeout));
IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
le32_to_cpu(cmd->tx_data_timeout));
- IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
- cmd->lprx_rssi_threshold);
+ if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
+ IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
+ le32_to_cpu(cmd->skip_dtim_periods));
+ if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
+ IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
+ le32_to_cpu(cmd->lprx_rssi_threshold));
}
}
@@ -103,6 +155,8 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int dtimper, dtimper_msec;
int keep_alive;
bool radar_detect = false;
+ struct iwl_mvm_vif *mvmvif __maybe_unused =
+ iwl_mvm_vif_from_mac80211(vif);
/*
* Regardless of power management state the driver must set
@@ -115,12 +169,27 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return;
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
+ if (!vif->bss_conf.assoc)
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
+ mvmvif->dbgfs_pm.disable_power_off)
+ cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
+#endif
if (!vif->bss_conf.ps)
return;
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
+ if (vif->bss_conf.beacon_rate &&
+ (vif->bss_conf.beacon_rate->bitrate == 10 ||
+ vif->bss_conf.beacon_rate->bitrate == 60)) {
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
+ cmd->lprx_rssi_threshold =
+ cpu_to_le32(POWER_LPRX_RSSI_THRESHOLD);
+ }
+
dtimper = hw->conf.ps_dtim_period ?: 1;
/* Check if radar detection is required on current channel */
@@ -135,8 +204,11 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
/* Check skip over DTIM conditions */
if (!radar_detect && (dtimper <= 10) &&
- (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP))
+ (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
+ mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+ cmd->skip_dtim_periods = cpu_to_le32(3);
+ }
/* Check that keep alive period is at least 3 * DTIM */
dtimper_msec = dtimper * vif->bss_conf.beacon_int;
@@ -145,27 +217,85 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
cmd->keep_alive_seconds = keep_alive;
- cmd->rx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
- cmd->tx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
+ if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
+ cmd->rx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
+ cmd->tx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
+ } else {
+ cmd->rx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
+ cmd->tx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
+ }
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE)
+ cmd->keep_alive_seconds = mvmvif->dbgfs_pm.keep_alive_seconds;
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) {
+ if (mvmvif->dbgfs_pm.skip_over_dtim)
+ cmd->flags |=
+ cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+ else
+ cmd->flags &=
+ cpu_to_le16(~POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+ }
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_RX_DATA_TIMEOUT)
+ cmd->rx_data_timeout =
+ cpu_to_le32(mvmvif->dbgfs_pm.rx_data_timeout);
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_TX_DATA_TIMEOUT)
+ cmd->tx_data_timeout =
+ cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout);
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS)
+ cmd->skip_dtim_periods =
+ cpu_to_le32(mvmvif->dbgfs_pm.skip_dtim_periods);
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) {
+ if (mvmvif->dbgfs_pm.lprx_ena)
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
+ else
+ cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK);
+ }
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD)
+ cmd->lprx_rssi_threshold =
+ cpu_to_le32(mvmvif->dbgfs_pm.lprx_rssi_threshold);
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
}
int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
+ int ret;
+ bool ba_enable;
struct iwl_powertable_cmd cmd = {};
if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
+ /*
+ * TODO: The following vif_count verification is temporary condition.
+ * Avoid power mode update if more than one interface is currently
+ * active. Remove this condition when FW will support power management
+ * on multiple MACs.
+ */
+ IWL_DEBUG_POWER(mvm, "Currently %d interfaces active\n",
+ mvm->vif_count);
+ if (mvm->vif_count > 1)
+ return 0;
+
iwl_mvm_power_build_cmd(mvm, vif, &cmd);
iwl_mvm_power_log(mvm, &cmd);
- return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
- sizeof(cmd), &cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
+ sizeof(cmd), &cmd);
+ if (ret)
+ return ret;
+
+ ba_enable = !!(cmd.flags &
+ cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
+
+ return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable);
}
int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_powertable_cmd cmd = {};
+ struct iwl_mvm_vif *mvmvif __maybe_unused =
+ iwl_mvm_vif_from_mac80211(vif);
if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
@@ -173,8 +303,82 @@ int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
+ mvmvif->dbgfs_pm.disable_power_off)
+ cmd.flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
+#endif
iwl_mvm_power_log(mvm, &cmd);
return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC,
sizeof(cmd), &cmd);
}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void
+iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
+ struct iwl_beacon_filter_cmd *cmd)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf;
+
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ENERGY_DELTA)
+ cmd->bf_energy_delta = dbgfs_bf->bf_energy_delta;
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA)
+ cmd->bf_roaming_energy_delta =
+ dbgfs_bf->bf_roaming_energy_delta;
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_STATE)
+ cmd->bf_roaming_state = dbgfs_bf->bf_roaming_state;
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMPERATURE_DELTA)
+ cmd->bf_temperature_delta = dbgfs_bf->bf_temperature_delta;
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BF_DEBUG_FLAG)
+ cmd->bf_debug_flag = dbgfs_bf->bf_debug_flag;
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ESCAPE_TIMER)
+ cmd->bf_escape_timer = cpu_to_le32(dbgfs_bf->bf_escape_timer);
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ESCAPE_TIMER)
+ cmd->ba_escape_timer = cpu_to_le32(dbgfs_bf->ba_escape_timer);
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT)
+ cmd->ba_enable_beacon_abort = dbgfs_bf->ba_enable_beacon_abort;
+}
+#endif
+
+int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_beacon_filter_cmd cmd = {
+ IWL_BF_CMD_CONFIG_DEFAULTS,
+ .bf_enable_beacon_filter = 1,
+ };
+ int ret;
+
+ if (mvmvif != mvm->bf_allowed_vif ||
+ vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ return 0;
+
+ iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
+ ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
+
+ if (!ret)
+ mvmvif->bf_enabled = true;
+
+ return ret;
+}
+
+int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_beacon_filter_cmd cmd = {};
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ return 0;
+
+ ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
+
+ if (!ret)
+ mvmvif->bf_enabled = false;
+
+ return ret;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
index a1e3e923ea3e..29d49cf0fdb2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/iwlwifi/mvm/quota.c
@@ -169,27 +169,34 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
num_active_bindings++;
}
- if (!num_active_bindings)
- goto send_cmd;
-
- quota = IWL_MVM_MAX_QUOTA / num_active_bindings;
- quota_rem = IWL_MVM_MAX_QUOTA % num_active_bindings;
+ quota = 0;
+ quota_rem = 0;
+ if (num_active_bindings) {
+ quota = IWL_MVM_MAX_QUOTA / num_active_bindings;
+ quota_rem = IWL_MVM_MAX_QUOTA % num_active_bindings;
+ }
for (idx = 0, i = 0; i < MAX_BINDINGS; i++) {
- if (data.n_interfaces[i] <= 0)
+ if (data.colors[i] < 0)
continue;
cmd.quotas[idx].id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(i, data.colors[i]));
- cmd.quotas[idx].quota = cpu_to_le32(quota);
- cmd.quotas[idx].max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA);
+
+ if (data.n_interfaces[i] <= 0) {
+ cmd.quotas[idx].quota = cpu_to_le32(0);
+ cmd.quotas[idx].max_duration = cpu_to_le32(0);
+ } else {
+ cmd.quotas[idx].quota = cpu_to_le32(quota);
+ cmd.quotas[idx].max_duration =
+ cpu_to_le32(IWL_MVM_MAX_QUOTA);
+ }
idx++;
}
/* Give the remainder of the session to the first binding */
le32_add_cpu(&cmd.quotas[0].quota, quota_rem);
-send_cmd:
ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC,
sizeof(cmd), &cmd);
if (ret)
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index b99fe3163866..b328a988c130 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -401,24 +401,29 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
load = rs_tl_get_load(lq_data, tid);
- if ((iwlwifi_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) {
- IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n",
- sta->addr, tid);
- ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
- if (ret == -EAGAIN) {
- /*
- * driver and mac80211 is out of sync
- * this might be cause by reloading firmware
- * stop the tx ba session here
- */
- IWL_ERR(mvm, "Fail start Tx agg on tid: %d\n",
- tid);
- ieee80211_stop_tx_ba_session(sta, tid);
- }
- } else {
- IWL_DEBUG_HT(mvm,
- "Aggregation not enabled for tid %d because load = %u\n",
- tid, load);
+ /*
+ * Don't create TX aggregation sessions when in high
+ * BT traffic, as they would just be disrupted by BT.
+ */
+ if (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= 2) {
+ IWL_DEBUG_COEX(mvm, "BT traffic (%d), no aggregation allowed\n",
+ BT_MBOX_MSG(&mvm->last_bt_notif,
+ 3, TRAFFIC_LOAD));
+ return ret;
+ }
+
+ IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n",
+ sta->addr, tid);
+ ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
+ if (ret == -EAGAIN) {
+ /*
+ * driver and mac80211 is out of sync
+ * this might be cause by reloading firmware
+ * stop the tx ba session here
+ */
+ IWL_ERR(mvm, "Fail start Tx agg on tid: %d\n",
+ tid);
+ ieee80211_stop_tx_ba_session(sta, tid);
}
return ret;
}
@@ -1519,6 +1524,29 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
u8 update_search_tbl_counter = 0;
int ret;
+ switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
+ case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
+ /* nothing */
+ break;
+ case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
+ /* avoid antenna B unless MIMO */
+ if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
+ tbl->action = IWL_SISO_SWITCH_MIMO2_AB;
+ break;
+ case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
+ case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
+ /* avoid antenna B and MIMO */
+ valid_tx_ant =
+ first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
+ if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
+ tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+ break;
+ default:
+ IWL_ERR(mvm, "Invalid BT load %d",
+ BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD));
+ break;
+ }
+
start_action = tbl->action;
while (1) {
lq_sta->action_counter++;
@@ -1532,7 +1560,9 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
tx_chains_num <= 2))
break;
- if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+ if (window->success_ratio >= IWL_RS_GOOD_RATIO &&
+ BT_MBOX_MSG(&mvm->last_bt_notif, 3,
+ TRAFFIC_LOAD) == 0)
break;
memcpy(search_tbl, tbl, sz);
@@ -1654,6 +1684,28 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
u8 update_search_tbl_counter = 0;
int ret;
+ switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
+ case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
+ /* nothing */
+ break;
+ case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
+ case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
+ /* avoid antenna B and MIMO */
+ if (tbl->action != IWL_MIMO2_SWITCH_SISO_A)
+ tbl->action = IWL_MIMO2_SWITCH_SISO_A;
+ break;
+ case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
+ /* avoid antenna B unless MIMO */
+ if (tbl->action == IWL_MIMO2_SWITCH_SISO_B ||
+ tbl->action == IWL_MIMO2_SWITCH_SISO_C)
+ tbl->action = IWL_MIMO2_SWITCH_SISO_A;
+ break;
+ default:
+ IWL_ERR(mvm, "Invalid BT load %d",
+ BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD));
+ break;
+ }
+
start_action = tbl->action;
while (1) {
lq_sta->action_counter++;
@@ -1791,6 +1843,28 @@ static int rs_move_mimo3_to_other(struct iwl_mvm *mvm,
int ret;
u8 update_search_tbl_counter = 0;
+ switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
+ case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
+ /* nothing */
+ break;
+ case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
+ case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
+ /* avoid antenna B and MIMO */
+ if (tbl->action != IWL_MIMO3_SWITCH_SISO_A)
+ tbl->action = IWL_MIMO3_SWITCH_SISO_A;
+ break;
+ case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
+ /* avoid antenna B unless MIMO */
+ if (tbl->action == IWL_MIMO3_SWITCH_SISO_B ||
+ tbl->action == IWL_MIMO3_SWITCH_SISO_C)
+ tbl->action = IWL_MIMO3_SWITCH_SISO_A;
+ break;
+ default:
+ IWL_ERR(mvm, "Invalid BT load %d",
+ BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD));
+ break;
+ }
+
start_action = tbl->action;
while (1) {
lq_sta->action_counter++;
@@ -2302,6 +2376,32 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
(current_tpt > (100 * tbl->expected_tpt[low]))))
scale_action = 0;
+ if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >=
+ IWL_BT_COEX_TRAFFIC_LOAD_HIGH) &&
+ (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
+ if (lq_sta->last_bt_traffic >
+ BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
+ /*
+ * don't set scale_action, don't want to scale up if
+ * the rate scale doesn't otherwise think that is a
+ * good idea.
+ */
+ } else if (lq_sta->last_bt_traffic <=
+ BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
+ scale_action = -1;
+ }
+ }
+ lq_sta->last_bt_traffic =
+ BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD);
+
+ if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >=
+ IWL_BT_COEX_TRAFFIC_LOAD_HIGH) &&
+ (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
+ /* search for a new modulation */
+ rs_stay_in_table(lq_sta, true);
+ goto lq_update;
+ }
+
switch (scale_action) {
case -1:
/* Decrease starting rate, update uCode's rate table */
@@ -2783,6 +2883,13 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
lq_cmd->agg_time_limit =
cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+
+ /*
+ * overwrite if needed, pass aggregation time limit
+ * to uCode in uSec - This is racy - but heh, at least it helps...
+ */
+ if (mvm && BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= 2)
+ lq_cmd->agg_time_limit = cpu_to_le16(1200);
}
static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
@@ -3081,3 +3188,29 @@ void iwl_mvm_rate_control_unregister(void)
{
ieee80211_rate_control_unregister(&rs_mvm_ops);
}
+
+/**
+ * iwl_mvm_tx_protection - Gets LQ command, change it to enable/disable
+ * Tx protection, according to this rquest and previous requests,
+ * and send the LQ command.
+ * @lq: The LQ command
+ * @mvmsta: The station
+ * @enable: Enable Tx protection?
+ */
+int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
+ struct iwl_mvm_sta *mvmsta, bool enable)
+{
+ lockdep_assert_held(&mvm->mutex);
+
+ if (enable) {
+ if (mvmsta->tx_protection == 0)
+ lq->flags |= LQ_FLAG_SET_STA_TLC_RTS_MSK;
+ mvmsta->tx_protection++;
+ } else {
+ mvmsta->tx_protection--;
+ if (mvmsta->tx_protection == 0)
+ lq->flags &= ~LQ_FLAG_SET_STA_TLC_RTS_MSK;
+ }
+
+ return iwl_mvm_send_lq_cmd(mvm, lq, CMD_ASYNC, false);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index 219c6857cc0f..cff4f6da7733 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -358,6 +358,18 @@ struct iwl_lq_sta {
u8 last_bt_traffic;
};
+enum iwl_bt_coex_profile_traffic_load {
+ IWL_BT_COEX_TRAFFIC_LOAD_NONE = 0,
+ IWL_BT_COEX_TRAFFIC_LOAD_LOW = 1,
+ IWL_BT_COEX_TRAFFIC_LOAD_HIGH = 2,
+ IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS = 3,
+/*
+ * There are no more even though below is a u8, the
+ * indication from the BT device only has two bits.
+ */
+};
+
+
static inline u8 num_of_ant(u8 mask)
{
return !!((mask) & ANT_A) +
@@ -390,4 +402,9 @@ extern int iwl_mvm_rate_control_register(void);
*/
extern void iwl_mvm_rate_control_unregister(void);
+struct iwl_mvm_sta;
+
+int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
+ struct iwl_mvm_sta *mvmsta, bool enable);
+
#endif /* __rs__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index 4dfc21a3e83e..e4930d5027d2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -363,3 +363,25 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
rxb, &rx_status);
return 0;
}
+
+/*
+ * iwl_mvm_rx_statistics - STATISTICS_NOTIFICATION handler
+ *
+ * TODO: This handler is implemented partially.
+ * It only gets the NIC's temperature.
+ */
+int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_notif_statistics *stats = (void *)&pkt->data;
+ struct mvm_statistics_general_common *common = &stats->general.common;
+
+ if (mvm->temperature != le32_to_cpu(common->temperature)) {
+ mvm->temperature = le32_to_cpu(common->temperature);
+ iwl_mvm_tt_handler(mvm);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 2476e43799d5..acdff6b67e04 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -137,8 +137,8 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd,
{
int fw_idx, req_idx;
- fw_idx = 0;
- for (req_idx = req->n_ssids - 1; req_idx > 0; req_idx--) {
+ for (req_idx = req->n_ssids - 1, fw_idx = 0; req_idx > 0;
+ req_idx--, fw_idx++) {
cmd->direct_scan[fw_idx].id = WLAN_EID_SSID;
cmd->direct_scan[fw_idx].len = req->ssids[req_idx].ssid_len;
memcpy(cmd->direct_scan[fw_idx].ssid,
@@ -153,7 +153,9 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd,
* just to notify that this scan is active and not passive.
* In order to notify the FW of the number of SSIDs we wish to scan (including
* the zero-length one), we need to set the corresponding bits in chan->type,
- * one for each SSID, and set the active bit (first).
+ * one for each SSID, and set the active bit (first). The first SSID is already
+ * included in the probe template, so we need to set only req->n_ssids - 1 bits
+ * in addition to the first bit.
*/
static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids)
{
@@ -176,19 +178,12 @@ static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
struct iwl_scan_channel *chan = (struct iwl_scan_channel *)
(cmd->data + le16_to_cpu(cmd->tx_cmd.len));
int i;
- __le32 chan_type_value;
-
- if (req->n_ssids > 0)
- chan_type_value = cpu_to_le32(BIT(req->n_ssids + 1) - 1);
- else
- chan_type_value = SCAN_CHANNEL_TYPE_PASSIVE;
for (i = 0; i < cmd->channel_count; i++) {
chan->channel = cpu_to_le16(req->channels[i]->hw_value);
+ chan->type = cpu_to_le32(BIT(req->n_ssids) - 1);
if (req->channels[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN)
- chan->type = SCAN_CHANNEL_TYPE_PASSIVE;
- else
- chan->type = chan_type_value;
+ chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE);
chan->active_dwell = cpu_to_le16(active_dwell);
chan->passive_dwell = cpu_to_le16(passive_dwell);
chan->iteration_count = cpu_to_le16(1);
@@ -298,12 +293,6 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
else
cmd->type = cpu_to_le32(SCAN_TYPE_FORCED);
- /*
- * TODO: This is a WA due to a bug in the FW AUX framework that does not
- * properly handle time events that fail to be scheduled
- */
- cmd->type = cpu_to_le32(SCAN_TYPE_FORCED);
-
cmd->repeats = cpu_to_le32(1);
/*
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 5c664ed54400..563f559b902d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -64,6 +64,7 @@
#include "mvm.h"
#include "sta.h"
+#include "rs.h"
static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm)
{
@@ -217,6 +218,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
mvmvif->color);
mvm_sta->vif = vif;
mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+ mvm_sta->tx_protection = 0;
+ mvm_sta->tt_tx_protection = false;
/* HW restart, don't assume the memory has been zeroed */
atomic_set(&mvm->pending_frames[sta_id], 0);
@@ -226,9 +229,6 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
- if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE)
- mvm_sta->tfd_queue_msk |= BIT(vif->cab_queue);
-
/* for HW restart - need to reset the seq_number etc... */
memset(mvm_sta->tid_data, 0, sizeof(mvm_sta->tid_data));
@@ -608,6 +608,8 @@ int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *bsta)
return ret;
}
+#define IWL_MAX_RX_BA_SESSIONS 16
+
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start)
{
@@ -618,11 +620,20 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lockdep_assert_held(&mvm->mutex);
+ if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
+ IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
+ return -ENOSPC;
+ }
+
cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
cmd.sta_id = mvm_sta->sta_id;
cmd.add_modify = STA_MODE_MODIFY;
- cmd.add_immediate_ba_tid = (u8) tid;
- cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
+ if (start) {
+ cmd.add_immediate_ba_tid = (u8) tid;
+ cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
+ } else {
+ cmd.remove_immediate_ba_tid = (u8) tid;
+ }
cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
STA_MODIFY_REMOVE_BA_TID;
@@ -648,6 +659,14 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
break;
}
+ if (!ret) {
+ if (start)
+ mvm->rx_ba_sessions++;
+ else if (mvm->rx_ba_sessions > 0)
+ /* check that restart flow didn't zero the counter */
+ mvm->rx_ba_sessions--;
+ }
+
return ret;
}
@@ -798,21 +817,23 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
min(mvmsta->max_agg_bufsize, buf_size);
mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
+ IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
+ sta->addr, tid);
+
if (mvm->cfg->ht_params->use_rts_for_aggregation) {
/*
* switch to RTS/CTS if it is the prefer protection
* method for HT traffic
+ * this function also sends the LQ command
*/
- mvmsta->lq_sta.lq.flags |= LQ_FLAG_SET_STA_TLC_RTS_MSK;
+ return iwl_mvm_tx_protection(mvm, &mvmsta->lq_sta.lq,
+ mvmsta, true);
/*
* TODO: remove the TLC_RTS flag when we tear down the last
* AGG session (agg_tids_count in DVM)
*/
}
- IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
- sta->addr, tid);
-
return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, CMD_ASYNC, false);
}
@@ -894,6 +915,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
u16 txq_id;
+ enum iwl_mvm_agg_state old_state;
/*
* First set the agg state to OFF to avoid calling
@@ -903,13 +925,17 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
txq_id = tid_data->txq_id;
IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
mvmsta->sta_id, tid, txq_id, tid_data->state);
+ old_state = tid_data->state;
tid_data->state = IWL_AGG_OFF;
spin_unlock_bh(&mvmsta->lock);
- if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
- IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
+ if (old_state >= IWL_AGG_ON) {
+ if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
+ IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
+
+ iwl_trans_txq_disable(mvm->trans, tid_data->txq_id);
+ }
- iwl_trans_txq_disable(mvm->trans, tid_data->txq_id);
mvm->queue_to_mac80211[tid_data->txq_id] =
IWL_INVALID_MAC80211_QUEUE;
@@ -1287,17 +1313,11 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
struct iwl_mvm_add_sta_cmd cmd = {
.add_modify = STA_MODE_MODIFY,
.sta_id = mvmsta->sta_id,
- .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
- .sleep_state_flags = cpu_to_le16(STA_SLEEP_STATE_AWAKE),
+ .station_flags_msk = cpu_to_le32(STA_FLG_PS),
.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
};
int ret;
- /*
- * Same modify mask for sleep_tx_count and sleep_state_flags but this
- * should be fine since if we set the STA as "awake", then
- * sleep_tx_count is not relevant.
- */
ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
if (ret)
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index a4ddce77aaae..94b265eb32b8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -250,7 +250,6 @@ enum iwl_mvm_agg_state {
* the first packet to be sent in legacy HW queue in Tx AGG stop flow.
* Basically when next_reclaimed reaches ssn, we can tell mac80211 that
* we are ready to finish the Tx AGG stop / start flow.
- * @wait_for_ba: Expect block-ack before next Tx reply
*/
struct iwl_mvm_tid_data {
u16 seq_number;
@@ -260,7 +259,6 @@ struct iwl_mvm_tid_data {
enum iwl_mvm_agg_state state;
u16 txq_id;
u16 ssn;
- bool wait_for_ba;
};
/**
@@ -275,6 +273,8 @@ struct iwl_mvm_tid_data {
* @lock: lock to protect the whole struct. Since %tid_data is access from Tx
* and from Tx response flow, it needs a spinlock.
* @tid_data: per tid data. Look at %iwl_mvm_tid_data.
+ * @tx_protection: reference counter for controlling the Tx protection.
+ * @tt_tx_protection: is thermal throttling enable Tx protection?
*
* When mac80211 creates a station it reserves some space (hw->sta_data_size)
* in the structure for use by driver. This structure is placed in that
@@ -296,6 +296,10 @@ struct iwl_mvm_sta {
#ifdef CONFIG_PM_SLEEP
u16 last_seq_ctl;
#endif
+
+ /* Temporary, until the new TLC will control the Tx protection */
+ s8 tx_protection;
+ bool tt_tx_protection;
};
/**
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
new file mode 100644
index 000000000000..d6ae7f16ac11
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -0,0 +1,530 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include "mvm.h"
+#include "iwl-config.h"
+#include "iwl-io.h"
+#include "iwl-csr.h"
+#include "iwl-prph.h"
+
+#define OTP_DTS_DIODE_DEVIATION 96 /*in words*/
+/* VBG - Voltage Band Gap error data (temperature offset) */
+#define OTP_WP_DTS_VBG (OTP_DTS_DIODE_DEVIATION + 2)
+#define MEAS_VBG_MIN_VAL 2300
+#define MEAS_VBG_MAX_VAL 3000
+#define MEAS_VBG_DEFAULT_VAL 2700
+#define DTS_DIODE_VALID(flags) (flags & DTS_DIODE_REG_FLAGS_PASS_ONCE)
+#define MIN_TEMPERATURE 0
+#define MAX_TEMPERATURE 125
+#define TEMPERATURE_ERROR (MAX_TEMPERATURE + 1)
+#define PTAT_DIGITAL_VALUE_MIN_VALUE 0
+#define PTAT_DIGITAL_VALUE_MAX_VALUE 0xFF
+#define DTS_VREFS_NUM 5
+static inline u32 DTS_DIODE_GET_VREFS_ID(u32 flags)
+{
+ return (flags & DTS_DIODE_REG_FLAGS_VREFS_ID) >>
+ DTS_DIODE_REG_FLAGS_VREFS_ID_POS;
+}
+
+#define CALC_VREFS_MIN_DIFF 43
+#define CALC_VREFS_MAX_DIFF 51
+#define CALC_LUT_SIZE (1 + CALC_VREFS_MAX_DIFF - CALC_VREFS_MIN_DIFF)
+#define CALC_LUT_INDEX_OFFSET CALC_VREFS_MIN_DIFF
+#define CALC_TEMPERATURE_RESULT_SHIFT_OFFSET 23
+
+/*
+ * @digital_value: The diode's digital-value sampled (temperature/voltage)
+ * @vref_low: The lower voltage-reference (the vref just below the diode's
+ * sampled digital-value)
+ * @vref_high: The higher voltage-reference (the vref just above the diode's
+ * sampled digital-value)
+ * @flags: bits[1:0]: The ID of the Vrefs pair (lowVref,highVref)
+ * bits[6:2]: Reserved.
+ * bits[7:7]: Indicates completion of at least 1 successful sample
+ * since last DTS reset.
+ */
+struct iwl_mvm_dts_diode_bits {
+ u8 digital_value;
+ u8 vref_low;
+ u8 vref_high;
+ u8 flags;
+} __packed;
+
+union dts_diode_results {
+ u32 reg_value;
+ struct iwl_mvm_dts_diode_bits bits;
+} __packed;
+
+static s16 iwl_mvm_dts_get_volt_band_gap(struct iwl_mvm *mvm)
+{
+ struct iwl_nvm_section calib_sec;
+ const __le16 *calib;
+ u16 vbg;
+
+ /* TODO: move parsing to NVM code */
+ calib_sec = mvm->nvm_sections[NVM_SECTION_TYPE_CALIBRATION];
+ calib = (__le16 *)calib_sec.data;
+
+ vbg = le16_to_cpu(calib[OTP_WP_DTS_VBG]);
+
+ if (vbg < MEAS_VBG_MIN_VAL || vbg > MEAS_VBG_MAX_VAL)
+ vbg = MEAS_VBG_DEFAULT_VAL;
+
+ return vbg;
+}
+
+static u16 iwl_mvm_dts_get_ptat_deviation_offset(struct iwl_mvm *mvm)
+{
+ const u8 *calib;
+ u8 ptat, pa1, pa2, median;
+
+ /* TODO: move parsing to NVM code */
+ calib = mvm->nvm_sections[NVM_SECTION_TYPE_CALIBRATION].data;
+ ptat = calib[OTP_DTS_DIODE_DEVIATION];
+ pa1 = calib[OTP_DTS_DIODE_DEVIATION + 1];
+ pa2 = calib[OTP_DTS_DIODE_DEVIATION + 2];
+
+ /* get the median: */
+ if (ptat > pa1) {
+ if (ptat > pa2)
+ median = (pa1 > pa2) ? pa1 : pa2;
+ else
+ median = ptat;
+ } else {
+ if (pa1 > pa2)
+ median = (ptat > pa2) ? ptat : pa2;
+ else
+ median = pa1;
+ }
+
+ return ptat - median;
+}
+
+static u8 iwl_mvm_dts_calibrate_ptat_deviation(struct iwl_mvm *mvm, u8 value)
+{
+ /* Calibrate the PTAT digital value, based on PTAT deviation data: */
+ s16 new_val = value - iwl_mvm_dts_get_ptat_deviation_offset(mvm);
+
+ if (new_val > PTAT_DIGITAL_VALUE_MAX_VALUE)
+ new_val = PTAT_DIGITAL_VALUE_MAX_VALUE;
+ else if (new_val < PTAT_DIGITAL_VALUE_MIN_VALUE)
+ new_val = PTAT_DIGITAL_VALUE_MIN_VALUE;
+
+ return new_val;
+}
+
+static bool dts_get_adjacent_vrefs(struct iwl_mvm *mvm,
+ union dts_diode_results *avg_ptat)
+{
+ u8 vrefs_results[DTS_VREFS_NUM];
+ u8 low_vref_index = 0, flags;
+ u32 reg;
+
+ reg = iwl_read_prph(mvm->trans, DTSC_VREF_AVG);
+ memcpy(vrefs_results, &reg, sizeof(reg));
+ reg = iwl_read_prph(mvm->trans, DTSC_VREF5_AVG);
+ vrefs_results[4] = reg & 0xff;
+
+ if (avg_ptat->bits.digital_value < vrefs_results[0] ||
+ avg_ptat->bits.digital_value > vrefs_results[4])
+ return false;
+
+ if (avg_ptat->bits.digital_value > vrefs_results[3])
+ low_vref_index = 3;
+ else if (avg_ptat->bits.digital_value > vrefs_results[2])
+ low_vref_index = 2;
+ else if (avg_ptat->bits.digital_value > vrefs_results[1])
+ low_vref_index = 1;
+
+ avg_ptat->bits.vref_low = vrefs_results[low_vref_index];
+ avg_ptat->bits.vref_high = vrefs_results[low_vref_index + 1];
+ flags = avg_ptat->bits.flags;
+ avg_ptat->bits.flags =
+ (flags & ~DTS_DIODE_REG_FLAGS_VREFS_ID) |
+ (low_vref_index & DTS_DIODE_REG_FLAGS_VREFS_ID);
+ return true;
+}
+
+/*
+ * return true it the results are valid, and false otherwise.
+ */
+static bool dts_read_ptat_avg_results(struct iwl_mvm *mvm,
+ union dts_diode_results *avg_ptat)
+{
+ u32 reg;
+ u8 tmp;
+
+ /* fill the diode value and pass_once with avg-reg results */
+ reg = iwl_read_prph(mvm->trans, DTSC_PTAT_AVG);
+ reg &= DTS_DIODE_REG_DIG_VAL | DTS_DIODE_REG_PASS_ONCE;
+ avg_ptat->reg_value = reg;
+
+ /* calibrate the PTAT digital value */
+ tmp = avg_ptat->bits.digital_value;
+ tmp = iwl_mvm_dts_calibrate_ptat_deviation(mvm, tmp);
+ avg_ptat->bits.digital_value = tmp;
+
+ /*
+ * fill vrefs fields, based on the avgVrefs results
+ * and the diode value
+ */
+ return dts_get_adjacent_vrefs(mvm, avg_ptat) &&
+ DTS_DIODE_VALID(avg_ptat->bits.flags);
+}
+
+static s32 calculate_nic_temperature(union dts_diode_results avg_ptat,
+ u16 volt_band_gap)
+{
+ u32 tmp_result;
+ u8 vrefs_diff;
+ /*
+ * For temperature calculation (at the end, shift right by 23)
+ * LUT[(D2-D1)] = ROUND{ 2^23 / ((D2-D1)*9*10) }
+ * (D2-D1) == 43 44 45 46 47 48 49 50 51
+ */
+ static const u16 calc_lut[CALC_LUT_SIZE] = {
+ 2168, 2118, 2071, 2026, 1983, 1942, 1902, 1864, 1828,
+ };
+
+ /*
+ * The diff between the high and low voltage-references is assumed
+ * to be strictly be in range of [60,68]
+ */
+ vrefs_diff = avg_ptat.bits.vref_high - avg_ptat.bits.vref_low;
+
+ if (vrefs_diff < CALC_VREFS_MIN_DIFF ||
+ vrefs_diff > CALC_VREFS_MAX_DIFF)
+ return TEMPERATURE_ERROR;
+
+ /* calculate the result: */
+ tmp_result =
+ vrefs_diff * (DTS_DIODE_GET_VREFS_ID(avg_ptat.bits.flags) + 9);
+ tmp_result += avg_ptat.bits.digital_value;
+ tmp_result -= avg_ptat.bits.vref_high;
+
+ /* multiply by the LUT value (based on the diff) */
+ tmp_result *= calc_lut[vrefs_diff - CALC_LUT_INDEX_OFFSET];
+
+ /*
+ * Get the BandGap (the voltage refereces source) error data
+ * (temperature offset)
+ */
+ tmp_result *= volt_band_gap;
+
+ /*
+ * here, tmp_result value can be up to 32-bits. We want to right-shift
+ * it *without* sign-extend.
+ */
+ tmp_result = tmp_result >> CALC_TEMPERATURE_RESULT_SHIFT_OFFSET;
+
+ /*
+ * at this point, tmp_result should be in the range:
+ * 200 <= tmp_result <= 365
+ */
+ return (s16)tmp_result - 240;
+}
+
+static s32 check_nic_temperature(struct iwl_mvm *mvm)
+{
+ u16 volt_band_gap;
+ union dts_diode_results avg_ptat;
+
+ volt_band_gap = iwl_mvm_dts_get_volt_band_gap(mvm);
+
+ /* disable DTS */
+ iwl_write_prph(mvm->trans, SHR_MISC_WFM_DTS_EN, 0);
+
+ /* SV initialization */
+ iwl_write_prph(mvm->trans, SHR_MISC_WFM_DTS_EN, 1);
+ iwl_write_prph(mvm->trans, DTSC_CFG_MODE,
+ DTSC_CFG_MODE_PERIODIC);
+
+ /* wait for results */
+ msleep(100);
+ if (!dts_read_ptat_avg_results(mvm, &avg_ptat))
+ return TEMPERATURE_ERROR;
+
+ /* disable DTS */
+ iwl_write_prph(mvm->trans, SHR_MISC_WFM_DTS_EN, 0);
+
+ return calculate_nic_temperature(avg_ptat, volt_band_gap);
+}
+
+static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm)
+{
+ u32 duration = mvm->thermal_throttle.params->ct_kill_duration;
+
+ IWL_ERR(mvm, "Enter CT Kill\n");
+ iwl_mvm_set_hw_ctkill_state(mvm, true);
+ schedule_delayed_work(&mvm->thermal_throttle.ct_kill_exit,
+ round_jiffies_relative(duration * HZ));
+}
+
+static void iwl_mvm_exit_ctkill(struct iwl_mvm *mvm)
+{
+ IWL_ERR(mvm, "Exit CT Kill\n");
+ iwl_mvm_set_hw_ctkill_state(mvm, false);
+}
+
+static void check_exit_ctkill(struct work_struct *work)
+{
+ struct iwl_mvm_tt_mgmt *tt;
+ struct iwl_mvm *mvm;
+ u32 duration;
+ s32 temp;
+
+ tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work);
+ mvm = container_of(tt, struct iwl_mvm, thermal_throttle);
+
+ duration = tt->params->ct_kill_duration;
+
+ iwl_trans_start_hw(mvm->trans);
+ temp = check_nic_temperature(mvm);
+ iwl_trans_stop_hw(mvm->trans, false);
+
+ if (temp < MIN_TEMPERATURE || temp > MAX_TEMPERATURE) {
+ IWL_DEBUG_TEMP(mvm, "Failed to measure NIC temperature\n");
+ goto reschedule;
+ }
+ IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", temp);
+
+ if (temp <= tt->params->ct_kill_exit) {
+ iwl_mvm_exit_ctkill(mvm);
+ return;
+ }
+
+reschedule:
+ schedule_delayed_work(&mvm->thermal_throttle.ct_kill_exit,
+ round_jiffies(duration * HZ));
+}
+
+static void iwl_mvm_tt_smps_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = _data;
+ enum ieee80211_smps_mode smps_mode;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (mvm->thermal_throttle.dynamic_smps)
+ smps_mode = IEEE80211_SMPS_DYNAMIC;
+ else
+ smps_mode = IEEE80211_SMPS_AUTOMATIC;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, smps_mode);
+}
+
+static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ int i, err;
+
+ for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(sta))
+ continue;
+ mvmsta = (void *)sta->drv_priv;
+ if (enable == mvmsta->tt_tx_protection)
+ continue;
+ err = iwl_mvm_tx_protection(mvm, &mvmsta->lq_sta.lq,
+ mvmsta, enable);
+ if (err) {
+ IWL_ERR(mvm, "Failed to %s Tx protection\n",
+ enable ? "enable" : "disable");
+ } else {
+ IWL_DEBUG_TEMP(mvm, "%s Tx protection\n",
+ enable ? "Enable" : "Disable");
+ mvmsta->tt_tx_protection = enable;
+ }
+ }
+}
+
+static void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
+{
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_THERMAL_MNG_BACKOFF,
+ .len = { sizeof(u32), },
+ .data = { &backoff, },
+ .flags = CMD_SYNC,
+ };
+
+ if (iwl_mvm_send_cmd(mvm, &cmd) == 0) {
+ IWL_DEBUG_TEMP(mvm, "Set Thermal Tx backoff to: %u\n",
+ backoff);
+ mvm->thermal_throttle.tx_backoff = backoff;
+ } else {
+ IWL_ERR(mvm, "Failed to change Thermal Tx backoff\n");
+ }
+}
+
+void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
+{
+ const struct iwl_tt_params *params = mvm->thermal_throttle.params;
+ struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
+ s32 temperature = mvm->temperature;
+ bool throttle_enable = false;
+ int i;
+ u32 tx_backoff;
+
+ IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", mvm->temperature);
+
+ if (params->support_ct_kill && temperature >= params->ct_kill_entry) {
+ iwl_mvm_enter_ctkill(mvm);
+ return;
+ }
+
+ if (params->support_dynamic_smps) {
+ if (!tt->dynamic_smps &&
+ temperature >= params->dynamic_smps_entry) {
+ IWL_DEBUG_TEMP(mvm, "Enable dynamic SMPS\n");
+ tt->dynamic_smps = true;
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_tt_smps_iterator, mvm);
+ throttle_enable = true;
+ } else if (tt->dynamic_smps &&
+ temperature <= params->dynamic_smps_exit) {
+ IWL_DEBUG_TEMP(mvm, "Disable dynamic SMPS\n");
+ tt->dynamic_smps = false;
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_tt_smps_iterator, mvm);
+ }
+ }
+
+ if (params->support_tx_protection) {
+ if (temperature >= params->tx_protection_entry) {
+ iwl_mvm_tt_tx_protection(mvm, true);
+ throttle_enable = true;
+ } else if (temperature <= params->tx_protection_exit) {
+ iwl_mvm_tt_tx_protection(mvm, false);
+ }
+ }
+
+ if (params->support_tx_backoff) {
+ tx_backoff = 0;
+ for (i = 0; i < TT_TX_BACKOFF_SIZE; i++) {
+ if (temperature < params->tx_backoff[i].temperature)
+ break;
+ tx_backoff = params->tx_backoff[i].backoff;
+ }
+ if (tx_backoff != 0)
+ throttle_enable = true;
+ if (tt->tx_backoff != tx_backoff)
+ iwl_mvm_tt_tx_backoff(mvm, tx_backoff);
+ }
+
+ if (!tt->throttle && throttle_enable) {
+ IWL_WARN(mvm,
+ "Due to high temperature thermal throttling initiated\n");
+ tt->throttle = true;
+ } else if (tt->throttle && !tt->dynamic_smps && tt->tx_backoff == 0 &&
+ temperature <= params->tx_protection_exit) {
+ IWL_WARN(mvm,
+ "Temperature is back to normal thermal throttling stopped\n");
+ tt->throttle = false;
+ }
+}
+
+static const struct iwl_tt_params iwl7000_tt_params = {
+ .ct_kill_entry = 118,
+ .ct_kill_exit = 96,
+ .ct_kill_duration = 5,
+ .dynamic_smps_entry = 114,
+ .dynamic_smps_exit = 110,
+ .tx_protection_entry = 114,
+ .tx_protection_exit = 108,
+ .tx_backoff = {
+ {.temperature = 112, .backoff = 200},
+ {.temperature = 113, .backoff = 600},
+ {.temperature = 114, .backoff = 1200},
+ {.temperature = 115, .backoff = 2000},
+ {.temperature = 116, .backoff = 4000},
+ {.temperature = 117, .backoff = 10000},
+ },
+ .support_ct_kill = true,
+ .support_dynamic_smps = true,
+ .support_tx_protection = true,
+ .support_tx_backoff = true,
+};
+
+void iwl_mvm_tt_initialize(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
+
+ IWL_DEBUG_TEMP(mvm, "Initialize Thermal Throttling\n");
+ tt->params = &iwl7000_tt_params;
+ tt->throttle = false;
+ INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill);
+}
+
+void iwl_mvm_tt_exit(struct iwl_mvm *mvm)
+{
+ cancel_delayed_work_sync(&mvm->thermal_throttle.ct_kill_exit);
+ IWL_DEBUG_TEMP(mvm, "Exit Thermal Throttling\n");
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 48c1891e3df6..f0e96a927407 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -175,7 +175,7 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
* table is controlled by LINK_QUALITY commands
*/
- if (ieee80211_is_data(fc)) {
+ if (ieee80211_is_data(fc) && sta) {
tx_cmd->initial_rate_index = 0;
tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
return;
@@ -408,7 +408,6 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
tid, txq_id, seq_number);
- /* NOTE: aggregation will need changes here (for txq id) */
if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
goto drop_unlock_sta;
@@ -610,8 +609,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
!(info->flags & IEEE80211_TX_STAT_ACK))
info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
- /* W/A FW bug: seq_ctl is wrong when the queue is flushed */
- if (status == TX_STATUS_FAIL_FIFO_FLUSHED) {
+ /* W/A FW bug: seq_ctl is wrong when the status isn't success */
+ if (status != TX_STATUS_SUCCESS) {
struct ieee80211_hdr *hdr = (void *)skb->data;
seq_ctl = le16_to_cpu(hdr->seq_ctrl);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index 687b34e387ac..1e1332839e4a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -76,6 +76,11 @@ int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
{
int ret;
+#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
+ if (WARN_ON(mvm->d3_test_active))
+ return -EIO;
+#endif
+
/*
* Synchronous commands from this op-mode must hold
* the mutex, this ensures we don't try to send two
@@ -125,6 +130,11 @@ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
lockdep_assert_held(&mvm->mutex);
+#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
+ if (WARN_ON(mvm->d3_test_active))
+ return -EIO;
+#endif
+
/*
* Only synchronous commands can wait for status,
* we use WANT_SKB so the caller can't.
@@ -471,3 +481,34 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
return iwl_mvm_send_cmd(mvm, &cmd);
}
+
+/**
+ * iwl_mvm_update_smps - Get a requst to change the SMPS mode
+ * @req_type: The part of the driver who call for a change.
+ * @smps_requests: The request to change the SMPS mode.
+ *
+ * Get a requst to change the SMPS mode,
+ * and change it according to all other requests in the driver.
+ */
+void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_smps_type_request req_type,
+ enum ieee80211_smps_mode smps_request)
+{
+ struct iwl_mvm_vif *mvmvif;
+ enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
+ int i;
+
+ lockdep_assert_held(&mvm->mutex);
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ mvmvif->smps_requests[req_type] = smps_request;
+ for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
+ if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC) {
+ smps_mode = IEEE80211_SMPS_STATIC;
+ break;
+ }
+ if (mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
+ smps_mode = IEEE80211_SMPS_DYNAMIC;
+ }
+
+ ieee80211_request_smps(vif, smps_mode);
+}
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 8cb53ec2b77b..ff13458efc27 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -78,6 +78,7 @@
/* Hardware specific file defines the PCI IDs table for that hardware module */
static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
+#if IS_ENABLED(CONFIG_IWLDVM)
{IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */
{IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */
{IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */
@@ -129,6 +130,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */
{IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */
{IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1326, iwl5150_abg_cfg)}, /* Half Mini Card */
{IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */
{IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */
@@ -253,13 +255,60 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)},
{IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)},
{IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)},
+#endif /* CONFIG_IWLDVM */
+#if IS_ENABLED(CONFIG_IWLMVM)
/* 7000 Series */
{IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)},
+
+/* 3160 Series */
+ {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
+#endif /* CONFIG_IWLMVM */
{0}
};
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 148843e7f34f..b654dcdd048a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -217,6 +217,7 @@ struct iwl_pcie_txq_scratch_buf {
* @trans_pcie: pointer back to transport (for timer)
* @need_update: indicates need to update read/write index
* @active: stores if queue is active
+ * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
*
* A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
* descriptors) and required locking structures.
@@ -232,6 +233,7 @@ struct iwl_txq {
struct iwl_trans_pcie *trans_pcie;
u8 need_update;
u8 active;
+ bool ampdu;
};
static inline dma_addr_t
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index 567e67ad1f61..f600e68a410a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -110,9 +110,10 @@
/*
* iwl_rxq_space - Return number of free slots available in queue.
*/
-static int iwl_rxq_space(const struct iwl_rxq *q)
+static int iwl_rxq_space(const struct iwl_rxq *rxq)
{
- int s = q->read - q->write;
+ int s = rxq->read - rxq->write;
+
if (s <= 0)
s += RX_QUEUE_SIZE;
/* keep some buffer to not confuse full and empty queue */
@@ -143,21 +144,22 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans)
/*
* iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
*/
-static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q)
+static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
+ struct iwl_rxq *rxq)
{
unsigned long flags;
u32 reg;
- spin_lock_irqsave(&q->lock, flags);
+ spin_lock_irqsave(&rxq->lock, flags);
- if (q->need_update == 0)
+ if (rxq->need_update == 0)
goto exit_unlock;
if (trans->cfg->base_params->shadow_reg_enable) {
/* shadow register enabled */
/* Device expects a multiple of 8 */
- q->write_actual = (q->write & ~0x7);
- iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual);
+ rxq->write_actual = (rxq->write & ~0x7);
+ iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
} else {
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -175,22 +177,22 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q)
goto exit_unlock;
}
- q->write_actual = (q->write & ~0x7);
+ rxq->write_actual = (rxq->write & ~0x7);
iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
- q->write_actual);
+ rxq->write_actual);
/* Else device is assumed to be awake */
} else {
/* Device expects a multiple of 8 */
- q->write_actual = (q->write & ~0x7);
+ rxq->write_actual = (rxq->write & ~0x7);
iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
- q->write_actual);
+ rxq->write_actual);
}
}
- q->need_update = 0;
+ rxq->need_update = 0;
exit_unlock:
- spin_unlock_irqrestore(&q->lock, flags);
+ spin_unlock_irqrestore(&rxq->lock, flags);
}
/*
@@ -355,19 +357,16 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
struct iwl_rxq *rxq = &trans_pcie->rxq;
int i;
- /* Fill the rx_used queue with _all_ of the Rx buffers */
+ lockdep_assert_held(&rxq->lock);
+
for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
- /* In the reset function, these buffers may have been allocated
- * to an SKB, so we need to unmap and free potential storage */
- if (rxq->pool[i].page != NULL) {
- dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
- PAGE_SIZE << trans_pcie->rx_page_order,
- DMA_FROM_DEVICE);
- __free_pages(rxq->pool[i].page,
- trans_pcie->rx_page_order);
- rxq->pool[i].page = NULL;
- }
- list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+ if (!rxq->pool[i].page)
+ continue;
+ dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << trans_pcie->rx_page_order,
+ DMA_FROM_DEVICE);
+ __free_pages(rxq->pool[i].page, trans_pcie->rx_page_order);
+ rxq->pool[i].page = NULL;
}
}
@@ -491,6 +490,20 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
}
+static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
+{
+ int i;
+
+ lockdep_assert_held(&rxq->lock);
+
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+ rxq->free_count = 0;
+
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
+ list_add(&rxq->pool[i].list, &rxq->rx_used);
+}
+
int iwl_pcie_rx_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -505,13 +518,12 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
}
spin_lock_irqsave(&rxq->lock, flags);
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
- INIT_WORK(&trans_pcie->rx_replenish,
- iwl_pcie_rx_replenish_work);
+ INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
+ /* free all first - we might be reconfigured for a different size */
iwl_pcie_rxq_free_rbs(trans);
+ iwl_pcie_rx_init_rxb_lists(rxq);
for (i = 0; i < RX_QUEUE_SIZE; i++)
rxq->queue[i] = NULL;
@@ -520,7 +532,6 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
* not restocked the Rx queue with fresh buffers */
rxq->read = rxq->write = 0;
rxq->write_actual = 0;
- rxq->free_count = 0;
memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
spin_unlock_irqrestore(&rxq->lock, flags);
@@ -802,9 +813,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
u32 handled = 0;
unsigned long flags;
u32 i;
-#ifdef CONFIG_IWLWIFI_DEBUG
- u32 inta_mask;
-#endif
lock_map_acquire(&trans->sync_cmd_lockdep_map);
@@ -826,14 +834,9 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
inta = trans_pcie->inta;
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_have_debug_level(IWL_DL_ISR)) {
- /* just for debug */
- inta_mask = iwl_read32(trans, CSR_INT_MASK);
+ if (iwl_have_debug_level(IWL_DL_ISR))
IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
- inta, inta_mask);
- }
-#endif
+ inta, iwl_read32(trans, CSR_INT_MASK));
/* saved interrupt in inta variable now we can reset trans_pcie->inta */
trans_pcie->inta = 0;
@@ -855,12 +858,11 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
goto out;
}
-#ifdef CONFIG_IWLWIFI_DEBUG
if (iwl_have_debug_level(IWL_DL_ISR)) {
/* NIC fires this, but we don't use it, redundant with WAKEUP */
if (inta & CSR_INT_BIT_SCD) {
- IWL_DEBUG_ISR(trans, "Scheduler finished to transmit "
- "the frame/frames.\n");
+ IWL_DEBUG_ISR(trans,
+ "Scheduler finished to transmit the frame/frames.\n");
isr_stats->sch++;
}
@@ -870,7 +872,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
isr_stats->alive++;
}
}
-#endif
+
/* Safely ignore these bits for debug checks below */
inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
@@ -886,6 +888,14 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
if (hw_rfkill) {
+ /*
+ * Clear the interrupt in APMG if the NIC is going down.
+ * Note that when the NIC exits RFkill (else branch), we
+ * can't access prph and the NIC will be reset in
+ * start_hw anyway.
+ */
+ iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
+ APMG_RTC_INT_STT_RFKILL);
set_bit(STATUS_RFKILL, &trans_pcie->status);
if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
&trans_pcie->status))
@@ -1118,9 +1128,6 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
struct iwl_trans *trans = data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 inta, inta_mask;
-#ifdef CONFIG_IWLWIFI_DEBUG
- u32 inta_fh;
-#endif
lockdep_assert_held(&trans_pcie->irq_lock);
@@ -1159,13 +1166,11 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
return IRQ_HANDLED;
}
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_have_debug_level(IWL_DL_ISR)) {
- inta_fh = iwl_read32(trans, CSR_FH_INT_STATUS);
- IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, "
- "fh 0x%08x\n", inta, inta_mask, inta_fh);
- }
-#endif
+ if (iwl_have_debug_level(IWL_DL_ISR))
+ IWL_DEBUG_ISR(trans,
+ "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
+ inta, inta_mask,
+ iwl_read32(trans, CSR_FH_INT_STATUS));
trans_pcie->inta |= inta;
/* the thread will service interrupts and re-enable them */
@@ -1198,7 +1203,7 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
{
struct iwl_trans *trans = data;
struct iwl_trans_pcie *trans_pcie;
- u32 inta, inta_mask;
+ u32 inta;
u32 val = 0;
u32 read;
unsigned long flags;
@@ -1226,7 +1231,6 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
* If we have something to service, the tasklet will re-enable ints.
* If we *don't* have something, we'll re-enable before leaving here.
*/
- inta_mask = iwl_read32(trans, CSR_INT_MASK);
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
/* Ignore interrupt if there's nothing in NIC to service.
@@ -1271,8 +1275,11 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
val |= 0x8000;
inta = (0xff & val) | ((0xff00 & val) << 16);
- IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
- inta, inta_mask, val);
+ IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled(sw) 0x%08x ict 0x%08x\n",
+ inta, trans_pcie->inta_mask, val);
+ if (iwl_have_debug_level(IWL_DL_ISR))
+ IWL_DEBUG_ISR(trans, "enabled(hw) 0x%08x\n",
+ iwl_read32(trans, CSR_INT_MASK));
inta &= trans_pcie->inta_mask;
trans_pcie->inta |= inta;
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 50ba0a468f94..96cfcdd39079 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -405,20 +405,27 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
{
u8 *v_addr;
dma_addr_t p_addr;
- u32 offset;
+ u32 offset, chunk_sz = section->len;
int ret = 0;
IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
section_num);
- v_addr = dma_alloc_coherent(trans->dev, PAGE_SIZE, &p_addr, GFP_KERNEL);
- if (!v_addr)
- return -ENOMEM;
+ v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!v_addr) {
+ IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
+ chunk_sz = PAGE_SIZE;
+ v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
+ &p_addr, GFP_KERNEL);
+ if (!v_addr)
+ return -ENOMEM;
+ }
- for (offset = 0; offset < section->len; offset += PAGE_SIZE) {
+ for (offset = 0; offset < section->len; offset += chunk_sz) {
u32 copy_size;
- copy_size = min_t(u32, PAGE_SIZE, section->len - offset);
+ copy_size = min_t(u32, chunk_sz, section->len - offset);
memcpy(v_addr, (u8 *)section->data + offset, copy_size);
ret = iwl_pcie_load_firmware_chunk(trans,
@@ -432,7 +439,7 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
}
}
- dma_free_coherent(trans->dev, PAGE_SIZE, v_addr, p_addr);
+ dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
return ret;
}
@@ -571,13 +578,17 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
clear_bit(STATUS_RFKILL, &trans_pcie->status);
}
-static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans)
+static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
{
- /* let the ucode operate on its own */
- iwl_write32(trans, CSR_UCODE_DRV_GP1_SET,
- CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
-
iwl_disable_interrupts(trans);
+
+ /*
+ * in testing mode, the host stays awake and the
+ * hardware won't be reset (not even partially)
+ */
+ if (test)
+ return;
+
iwl_pcie_disable_ict(trans);
iwl_clear_bit(trans, CSR_GP_CNTRL,
@@ -596,11 +607,18 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans)
}
static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
- enum iwl_d3_status *status)
+ enum iwl_d3_status *status,
+ bool test)
{
u32 val;
int ret;
+ if (test) {
+ iwl_enable_interrupts(trans);
+ *status = IWL_D3_STATUS_ALIVE;
+ return 0;
+ }
+
iwl_pcie_set_pwr(trans, false);
val = iwl_read32(trans, CSR_RESET);
@@ -636,9 +654,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
return ret;
}
- iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
-
*status = IWL_D3_STATUS_ALIVE;
return 0;
}
@@ -655,6 +670,11 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
return err;
}
+ /* Reset the entire device */
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+ usleep_range(10, 15);
+
iwl_pcie_apm_init(trans);
/* From now on, the op_mode will be kept updated about RF kill state */
@@ -823,8 +843,9 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
unsigned long *flags)
{
int ret;
- struct iwl_trans_pcie *pcie_trans = IWL_TRANS_GET_PCIE_TRANS(trans);
- spin_lock_irqsave(&pcie_trans->reg_lock, *flags);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
/* this bit wakes up the NIC */
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
@@ -860,7 +881,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
WARN_ONCE(1,
"Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
val);
- spin_unlock_irqrestore(&pcie_trans->reg_lock, *flags);
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
return false;
}
}
@@ -869,22 +890,22 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
* Fool sparse by faking we release the lock - sparse will
* track nic_access anyway.
*/
- __release(&pcie_trans->reg_lock);
+ __release(&trans_pcie->reg_lock);
return true;
}
static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
unsigned long *flags)
{
- struct iwl_trans_pcie *pcie_trans = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- lockdep_assert_held(&pcie_trans->reg_lock);
+ lockdep_assert_held(&trans_pcie->reg_lock);
/*
* Fool sparse by faking we acquiring the lock - sparse will
* track nic_access anyway.
*/
- __acquire(&pcie_trans->reg_lock);
+ __acquire(&trans_pcie->reg_lock);
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
@@ -895,7 +916,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
* scheduled on different CPUs (after we drop reg_lock).
*/
mmiowb();
- spin_unlock_irqrestore(&pcie_trans->reg_lock, *flags);
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
}
static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
@@ -917,11 +938,11 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
}
static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
- void *buf, int dwords)
+ const void *buf, int dwords)
{
unsigned long flags;
int offs, ret = 0;
- u32 *vals = buf;
+ const u32 *vals = buf;
if (iwl_trans_grab_nic_access(trans, false, &flags)) {
iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index c5e30294c5ac..c47c92165aba 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -224,13 +224,13 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
switch (sec_ctl & TX_CMD_SEC_MSK) {
case TX_CMD_SEC_CCM:
- len += CCMP_MIC_LEN;
+ len += IEEE80211_CCMP_MIC_LEN;
break;
case TX_CMD_SEC_TKIP:
- len += TKIP_ICV_LEN;
+ len += IEEE80211_TKIP_ICV_LEN;
break;
case TX_CMD_SEC_WEP:
- len += WEP_IV_LEN + WEP_ICV_LEN;
+ len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
break;
}
@@ -576,10 +576,16 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
spin_lock_bh(&txq->lock);
while (q->write_ptr != q->read_ptr) {
+ IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
+ txq_id, q->read_ptr);
iwl_pcie_txq_free_tfd(trans, txq);
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
}
+ txq->active = false;
spin_unlock_bh(&txq->lock);
+
+ /* just in case - this queue may have been stopped */
+ iwl_wake_queue(trans, txq);
}
/*
@@ -927,6 +933,12 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
spin_lock_bh(&txq->lock);
+ if (!txq->active) {
+ IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
+ txq_id, ssn);
+ goto out;
+ }
+
if (txq->q.read_ptr == tfd_num)
goto out;
@@ -1045,6 +1057,10 @@ static inline void iwl_pcie_txq_set_inactive(struct iwl_trans *trans,
(1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
}
+/* Receiver address (actually, Rx station's index into station table),
+ * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
+#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
+
void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
int sta_id, int tid, int frame_limit, u16 ssn)
{
@@ -1069,6 +1085,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
/* enable aggregations for the queue */
iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
+ trans_pcie->txq[txq_id].ampdu = true;
} else {
/*
* disable aggregations for the queue, this will also make the
@@ -1103,6 +1120,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
(fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
(1 << SCD_QUEUE_STTS_REG_POS_WSL) |
SCD_QUEUE_STTS_REG_MSK);
+ trans_pcie->txq[txq_id].active = true;
IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n",
txq_id, fifo, ssn & 0xff);
}
@@ -1125,6 +1143,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
ARRAY_SIZE(zero_val));
iwl_pcie_txq_unmap(trans, txq_id);
+ trans_pcie->txq[txq_id].ampdu = false;
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
}
@@ -1518,11 +1537,13 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) {
IWL_ERR(trans, "FW error in SYNC CMD %s\n",
get_cmd_string(trans_pcie, cmd->id));
+ dump_stack();
ret = -EIO;
goto cancel;
}
- if (test_bit(STATUS_RFKILL, &trans_pcie->status)) {
+ if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+ test_bit(STATUS_RFKILL, &trans_pcie->status)) {
IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
ret = -ERFKILL;
goto cancel;
@@ -1564,7 +1585,8 @@ int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
if (test_bit(STATUS_FW_ERROR, &trans_pcie->status))
return -EIO;
- if (test_bit(STATUS_RFKILL, &trans_pcie->status)) {
+ if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+ test_bit(STATUS_RFKILL, &trans_pcie->status)) {
IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
cmd->id);
return -ERFKILL;
@@ -1592,7 +1614,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
u8 wait_write_ptr = 0;
__le16 fc = hdr->frame_control;
u8 hdr_len = ieee80211_hdrlen(fc);
- u16 __maybe_unused wifi_seq;
+ u16 wifi_seq;
txq = &trans_pcie->txq[txq_id];
q = &txq->q;
@@ -1609,13 +1631,11 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
* the BA.
* Check here that the packets are in the right place on the ring.
*/
-#ifdef CONFIG_IWLWIFI_DEBUG
wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
- WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
- ((wifi_seq & 0xff) != q->write_ptr),
+ WARN_ONCE(trans_pcie->txq[txq_id].ampdu &&
+ (wifi_seq & 0xff) != q->write_ptr,
"Q: %d WiFi Seq %d tfdNum %d",
txq_id, wifi_seq, q->write_ptr);
-#endif
/* Set up driver data for this TFD */
txq->entries[q->write_ptr].skb = skb;
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index 3e81264db81e..efae07e05c80 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -240,7 +240,7 @@ static ssize_t lbs_prb_rsp_limit_set(struct device *dev,
memset(&mesh_access, 0, sizeof(mesh_access));
mesh_access.data[0] = cpu_to_le32(CMD_ACT_SET);
- if (!strict_strtoul(buf, 10, &retry_limit))
+ if (!kstrtoul(buf, 10, &retry_limit))
return -ENOTSUPP;
if (retry_limit > 15)
return -ENOTSUPP;
diff --git a/drivers/net/wireless/mwifiex/11h.c b/drivers/net/wireless/mwifiex/11h.c
new file mode 100644
index 000000000000..8d683070bdb3
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/11h.c
@@ -0,0 +1,101 @@
+/*
+ * Marvell Wireless LAN device driver: 802.11h
+ *
+ * Copyright (C) 2013, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License"). You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available by writing to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
+ * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#include "main.h"
+#include "fw.h"
+
+
+/* This function appends 11h info to a buffer while joining an
+ * infrastructure BSS
+ */
+static void
+mwifiex_11h_process_infra_join(struct mwifiex_private *priv, u8 **buffer,
+ struct mwifiex_bssdescriptor *bss_desc)
+{
+ struct mwifiex_ie_types_header *ie_header;
+ struct mwifiex_ie_types_pwr_capability *cap;
+ struct mwifiex_ie_types_local_pwr_constraint *constraint;
+ struct ieee80211_supported_band *sband;
+ u8 radio_type;
+ int i;
+
+ if (!buffer || !(*buffer))
+ return;
+
+ radio_type = mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
+ sband = priv->wdev->wiphy->bands[radio_type];
+
+ cap = (struct mwifiex_ie_types_pwr_capability *)*buffer;
+ cap->header.type = cpu_to_le16(WLAN_EID_PWR_CAPABILITY);
+ cap->header.len = cpu_to_le16(2);
+ cap->min_pwr = 0;
+ cap->max_pwr = 0;
+ *buffer += sizeof(*cap);
+
+ constraint = (struct mwifiex_ie_types_local_pwr_constraint *)*buffer;
+ constraint->header.type = cpu_to_le16(WLAN_EID_PWR_CONSTRAINT);
+ constraint->header.len = cpu_to_le16(2);
+ constraint->chan = bss_desc->channel;
+ constraint->constraint = bss_desc->local_constraint;
+ *buffer += sizeof(*constraint);
+
+ ie_header = (struct mwifiex_ie_types_header *)*buffer;
+ ie_header->type = cpu_to_le16(TLV_TYPE_PASSTHROUGH);
+ ie_header->len = cpu_to_le16(2 * sband->n_channels + 2);
+ *buffer += sizeof(*ie_header);
+ *(*buffer)++ = WLAN_EID_SUPPORTED_CHANNELS;
+ *(*buffer)++ = 2 * sband->n_channels;
+ for (i = 0; i < sband->n_channels; i++) {
+ *(*buffer)++ = ieee80211_frequency_to_channel(
+ sband->channels[i].center_freq);
+ *(*buffer)++ = 1; /* one channel in the subband */
+ }
+}
+
+/* Enable or disable the 11h extensions in the firmware */
+static int mwifiex_11h_activate(struct mwifiex_private *priv, bool flag)
+{
+ u32 enable = flag;
+
+ return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_SET, DOT11H_I, &enable);
+}
+
+/* This functions processes TLV buffer for a pending BSS Join command.
+ *
+ * Activate 11h functionality in the firmware if the spectrum management
+ * capability bit is found in the network we are joining. Also, necessary
+ * TLVs are set based on requested network's 11h capability.
+ */
+void mwifiex_11h_process_join(struct mwifiex_private *priv, u8 **buffer,
+ struct mwifiex_bssdescriptor *bss_desc)
+{
+ if (bss_desc->sensed_11h) {
+ /* Activate 11h functions in firmware, turns on capability
+ * bit
+ */
+ mwifiex_11h_activate(priv, true);
+ bss_desc->cap_info_bitmap |= WLAN_CAPABILITY_SPECTRUM_MGMT;
+ mwifiex_11h_process_infra_join(priv, buffer, bss_desc);
+ } else {
+ /* Deactivate 11h functions in the firmware */
+ mwifiex_11h_activate(priv, false);
+ bss_desc->cap_info_bitmap &= ~WLAN_CAPABILITY_SPECTRUM_MGMT;
+ }
+}
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
index 4f614aad9ded..f7ff4725506a 100644
--- a/drivers/net/wireless/mwifiex/Kconfig
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -3,13 +3,13 @@ config MWIFIEX
depends on CFG80211
---help---
This adds support for wireless adapters based on Marvell
- 802.11n chipsets.
+ 802.11n/ac chipsets.
If you choose to build it as a module, it will be called
mwifiex.
config MWIFIEX_SDIO
- tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797"
+ tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8897"
depends on MWIFIEX && MMC
select FW_LOADER
---help---
diff --git a/drivers/net/wireless/mwifiex/Makefile b/drivers/net/wireless/mwifiex/Makefile
index ecf28464367f..a42a506fd32b 100644
--- a/drivers/net/wireless/mwifiex/Makefile
+++ b/drivers/net/wireless/mwifiex/Makefile
@@ -40,6 +40,7 @@ mwifiex-y += sta_rx.o
mwifiex-y += uap_txrx.o
mwifiex-y += cfg80211.o
mwifiex-y += ethtool.o
+mwifiex-y += 11h.o
mwifiex-$(CONFIG_DEBUG_FS) += debugfs.o
obj-$(CONFIG_MWIFIEX) += mwifiex.o
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index e42b266a023a..89459db4c53b 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -20,6 +20,9 @@
#include "cfg80211.h"
#include "main.h"
+static char *reg_alpha2;
+module_param(reg_alpha2, charp, 0);
+
static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = {
{
.max = 2, .types = BIT(NL80211_IFTYPE_STATION),
@@ -1231,6 +1234,51 @@ static int mwifiex_cfg80211_change_beacon(struct wiphy *wiphy,
return 0;
}
+/* cfg80211 operation handler for del_station.
+ * Function deauthenticates station which value is provided in mac parameter.
+ * If mac is NULL/broadcast, all stations in associated station list are
+ * deauthenticated. If bss is not started or there are no stations in
+ * associated stations list, no action is taken.
+ */
+static int
+mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
+ u8 *mac)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ struct mwifiex_sta_node *sta_node;
+ unsigned long flags;
+
+ if (list_empty(&priv->sta_list) || !priv->bss_started)
+ return 0;
+
+ if (!mac || is_broadcast_ether_addr(mac)) {
+ wiphy_dbg(wiphy, "%s: NULL/broadcast mac address\n", __func__);
+ list_for_each_entry(sta_node, &priv->sta_list, list) {
+ if (mwifiex_send_cmd_sync(priv,
+ HostCmd_CMD_UAP_STA_DEAUTH,
+ HostCmd_ACT_GEN_SET, 0,
+ sta_node->mac_addr))
+ return -1;
+ mwifiex_uap_del_sta_data(priv, sta_node);
+ }
+ } else {
+ wiphy_dbg(wiphy, "%s: mac address %pM\n", __func__, mac);
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ sta_node = mwifiex_get_sta_entry(priv, mac);
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ if (sta_node) {
+ if (mwifiex_send_cmd_sync(priv,
+ HostCmd_CMD_UAP_STA_DEAUTH,
+ HostCmd_ACT_GEN_SET, 0,
+ sta_node->mac_addr))
+ return -1;
+ mwifiex_uap_del_sta_data(priv, sta_node);
+ }
+ }
+
+ return 0;
+}
+
static int
mwifiex_cfg80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
{
@@ -1668,9 +1716,9 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
int ret;
- if (priv->bss_mode != NL80211_IFTYPE_STATION) {
+ if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) {
wiphy_err(wiphy,
- "%s: reject infra assoc request in non-STA mode\n",
+ "%s: reject infra assoc request in non-STA role\n",
dev->name);
return -EINVAL;
}
@@ -1859,6 +1907,7 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
int i, offset, ret;
struct ieee80211_channel *chan;
struct ieee_types_header *ie;
+ struct mwifiex_user_scan_cfg *user_scan_cfg;
wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name);
@@ -1869,20 +1918,22 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
return -EBUSY;
}
- if (priv->user_scan_cfg) {
+ /* Block scan request if scan operation or scan cleanup when interface
+ * is disabled is in process
+ */
+ if (priv->scan_request || priv->scan_aborting) {
dev_err(priv->adapter->dev, "cmd: Scan already in process..\n");
return -EBUSY;
}
- priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
- GFP_KERNEL);
- if (!priv->user_scan_cfg)
+ user_scan_cfg = kzalloc(sizeof(*user_scan_cfg), GFP_KERNEL);
+ if (!user_scan_cfg)
return -ENOMEM;
priv->scan_request = request;
- priv->user_scan_cfg->num_ssids = request->n_ssids;
- priv->user_scan_cfg->ssid_list = request->ssids;
+ user_scan_cfg->num_ssids = request->n_ssids;
+ user_scan_cfg->ssid_list = request->ssids;
if (request->ie && request->ie_len) {
offset = 0;
@@ -1902,25 +1953,25 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
for (i = 0; i < min_t(u32, request->n_channels,
MWIFIEX_USER_SCAN_CHAN_MAX); i++) {
chan = request->channels[i];
- priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value;
- priv->user_scan_cfg->chan_list[i].radio_type = chan->band;
+ user_scan_cfg->chan_list[i].chan_number = chan->hw_value;
+ user_scan_cfg->chan_list[i].radio_type = chan->band;
if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)
- priv->user_scan_cfg->chan_list[i].scan_type =
+ user_scan_cfg->chan_list[i].scan_type =
MWIFIEX_SCAN_TYPE_PASSIVE;
else
- priv->user_scan_cfg->chan_list[i].scan_type =
+ user_scan_cfg->chan_list[i].scan_type =
MWIFIEX_SCAN_TYPE_ACTIVE;
- priv->user_scan_cfg->chan_list[i].scan_time = 0;
+ user_scan_cfg->chan_list[i].scan_time = 0;
}
- ret = mwifiex_scan_networks(priv, priv->user_scan_cfg);
+ ret = mwifiex_scan_networks(priv, user_scan_cfg);
+ kfree(user_scan_cfg);
if (ret) {
dev_err(priv->adapter->dev, "scan failed: %d\n", ret);
+ priv->scan_aborting = false;
priv->scan_request = NULL;
- kfree(priv->user_scan_cfg);
- priv->user_scan_cfg = NULL;
return ret;
}
@@ -2419,6 +2470,7 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
.change_beacon = mwifiex_cfg80211_change_beacon,
.set_cqm_rssi_config = mwifiex_cfg80211_set_cqm_rssi_config,
.set_antenna = mwifiex_cfg80211_set_antenna,
+ .del_station = mwifiex_cfg80211_del_station,
#ifdef CONFIG_PM
.suspend = mwifiex_cfg80211_suspend,
.resume = mwifiex_cfg80211_resume,
@@ -2426,6 +2478,27 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
#endif
};
+#ifdef CONFIG_PM
+static const struct wiphy_wowlan_support mwifiex_wowlan_support = {
+ .flags = WIPHY_WOWLAN_MAGIC_PKT,
+ .n_patterns = MWIFIEX_MAX_FILTERS,
+ .pattern_min_len = 1,
+ .pattern_max_len = MWIFIEX_MAX_PATTERN_LEN,
+ .max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN,
+};
+#endif
+
+static bool mwifiex_is_valid_alpha2(const char *alpha2)
+{
+ if (!alpha2 || strlen(alpha2) != 2)
+ return false;
+
+ if (isalpha(alpha2[0]) && isalpha(alpha2[1]))
+ return true;
+
+ return false;
+}
+
/*
* This function registers the device with CFG802.11 subsystem.
*
@@ -2478,16 +2551,13 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
WIPHY_FLAG_AP_UAPSD |
WIPHY_FLAG_CUSTOM_REGULATORY |
+ WIPHY_FLAG_STRICT_REGULATORY |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
wiphy_apply_custom_regulatory(wiphy, &mwifiex_world_regdom_custom);
#ifdef CONFIG_PM
- wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT;
- wiphy->wowlan.n_patterns = MWIFIEX_MAX_FILTERS;
- wiphy->wowlan.pattern_min_len = 1;
- wiphy->wowlan.pattern_max_len = MWIFIEX_MAX_PATTERN_LEN;
- wiphy->wowlan.max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN;
+ wiphy->wowlan = &mwifiex_wowlan_support;
#endif
wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
@@ -2519,10 +2589,16 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wiphy_free(wiphy);
return ret;
}
- country_code = mwifiex_11d_code_2_region(priv->adapter->region_code);
- if (country_code)
- dev_info(adapter->dev,
- "ignoring F/W country code %2.2s\n", country_code);
+
+ if (reg_alpha2 && mwifiex_is_valid_alpha2(reg_alpha2)) {
+ wiphy_info(wiphy, "driver hint alpha2: %2.2s\n", reg_alpha2);
+ regulatory_hint(wiphy, reg_alpha2);
+ } else {
+ country_code = mwifiex_11d_code_2_region(adapter->region_code);
+ if (country_code)
+ wiphy_info(wiphy, "ignoring F/W country code %2.2s\n",
+ country_code);
+ }
adapter->wiphy = wiphy;
return ret;
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index 988552dece75..5178c4630d89 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -415,7 +415,8 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
u32 k = 0;
struct mwifiex_adapter *adapter = priv->adapter;
- if (priv->bss_mode == NL80211_IFTYPE_STATION) {
+ if (priv->bss_mode == NL80211_IFTYPE_STATION ||
+ priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
switch (adapter->config_bands) {
case BAND_B:
dev_dbg(adapter->dev, "info: infra band=%d "
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 26755d9acb55..2d761477d15e 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -570,6 +570,7 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
case HostCmd_CMD_UAP_SYS_CONFIG:
case HostCmd_CMD_UAP_BSS_START:
case HostCmd_CMD_UAP_BSS_STOP:
+ case HostCmd_CMD_UAP_STA_DEAUTH:
ret = mwifiex_uap_prepare_cmd(priv, cmd_no, cmd_action,
cmd_oid, data_buf,
cmd_ptr);
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 1f7578d553ec..1b45aa533300 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -245,6 +245,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HT_BW_20 0
#define HT_BW_40 1
+#define DFS_CHAN_MOVE_TIME 10000
+
#define HostCmd_CMD_GET_HW_SPEC 0x0003
#define HostCmd_CMD_802_11_SCAN 0x0006
#define HostCmd_CMD_802_11_GET_LOG 0x000b
@@ -271,6 +273,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_802_11_SUBSCRIBE_EVENT 0x0075
#define HostCmd_CMD_802_11_TX_RATE_QUERY 0x007f
#define HostCmd_CMD_802_11_IBSS_COALESCING_STATUS 0x0083
+#define HostCmd_CMD_CFG_DATA 0x008f
#define HostCmd_CMD_VERSION_EXT 0x0097
#define HostCmd_CMD_MEF_CFG 0x009a
#define HostCmd_CMD_RSSI_INFO 0x00a4
@@ -279,6 +282,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_UAP_SYS_CONFIG 0x00b0
#define HostCmd_CMD_UAP_BSS_START 0x00b1
#define HostCmd_CMD_UAP_BSS_STOP 0x00b2
+#define HostCmd_CMD_UAP_STA_DEAUTH 0x00b5
#define HostCmd_CMD_11N_CFG 0x00cd
#define HostCmd_CMD_11N_ADDBA_REQ 0x00ce
#define HostCmd_CMD_11N_ADDBA_RSP 0x00cf
@@ -436,6 +440,7 @@ enum P2P_MODES {
#define EVENT_BW_CHANGE 0x00000048
#define EVENT_UAP_MIC_COUNTERMEASURES 0x0000004c
#define EVENT_HOSTWAKE_STAIE 0x0000004d
+#define EVENT_CHANNEL_SWITCH_ANN 0x00000050
#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f
#define EVENT_ID_MASK 0xffff
@@ -464,6 +469,8 @@ enum P2P_MODES {
#define MWIFIEX_CRITERIA_UNICAST BIT(1)
#define MWIFIEX_CRITERIA_MULTICAST BIT(3)
+#define CFG_DATA_TYPE_CAL 2
+
struct mwifiex_ie_types_header {
__le16 type;
__le16 len;
@@ -971,6 +978,7 @@ enum SNMP_MIB_INDEX {
LONG_RETRY_LIM_I = 7,
FRAG_THRESH_I = 8,
DOT11D_I = 9,
+ DOT11H_I = 10,
};
#define MAX_SNMP_BUF_SIZE 128
@@ -1197,6 +1205,23 @@ struct host_cmd_ds_amsdu_aggr_ctrl {
__le16 curr_buf_size;
} __packed;
+struct host_cmd_ds_sta_deauth {
+ u8 mac[ETH_ALEN];
+ __le16 reason;
+} __packed;
+
+struct mwifiex_ie_types_pwr_capability {
+ struct mwifiex_ie_types_header header;
+ s8 min_pwr;
+ s8 max_pwr;
+};
+
+struct mwifiex_ie_types_local_pwr_constraint {
+ struct mwifiex_ie_types_header header;
+ u8 chan;
+ u8 constraint;
+};
+
struct mwifiex_ie_types_wmm_param_set {
struct mwifiex_ie_types_header header;
u8 wmm_ie[1];
@@ -1573,6 +1598,12 @@ struct mwifiex_ie_list {
struct mwifiex_ie ie_list[MAX_MGMT_IE_INDEX];
} __packed;
+struct host_cmd_ds_802_11_cfg_data {
+ __le16 action;
+ __le16 type;
+ __le16 data_len;
+} __packed;
+
struct host_cmd_ds_command {
__le16 command;
__le16 size;
@@ -1630,7 +1661,9 @@ struct host_cmd_ds_command {
struct host_cmd_ds_802_11_eeprom_access eeprom;
struct host_cmd_ds_802_11_subsc_evt subsc_evt;
struct host_cmd_ds_sys_config uap_sys_config;
+ struct host_cmd_ds_sta_deauth sta_deauth;
struct host_cmd_11ac_vht_cfg vht_cfg;
+ struct host_cmd_ds_802_11_cfg_data cfg_data;
} params;
} __packed;
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 9f44fda19db9..2cf8b964e966 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -52,87 +52,6 @@ static int mwifiex_add_bss_prio_tbl(struct mwifiex_private *priv)
return 0;
}
-static void scan_delay_timer_fn(unsigned long data)
-{
- struct mwifiex_private *priv = (struct mwifiex_private *)data;
- struct mwifiex_adapter *adapter = priv->adapter;
- struct cmd_ctrl_node *cmd_node, *tmp_node;
- unsigned long flags;
-
- if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) {
- /*
- * Abort scan operation by cancelling all pending scan
- * commands
- */
- spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
- list_for_each_entry_safe(cmd_node, tmp_node,
- &adapter->scan_pending_q, list) {
- list_del(&cmd_node->list);
- mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
- }
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
-
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
- adapter->scan_processing = false;
- adapter->scan_delay_cnt = 0;
- adapter->empty_tx_q_cnt = 0;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
-
- if (priv->user_scan_cfg) {
- if (priv->scan_request) {
- dev_dbg(priv->adapter->dev,
- "info: aborting scan\n");
- cfg80211_scan_done(priv->scan_request, 1);
- priv->scan_request = NULL;
- } else {
- dev_dbg(priv->adapter->dev,
- "info: scan already aborted\n");
- }
-
- kfree(priv->user_scan_cfg);
- priv->user_scan_cfg = NULL;
- }
- goto done;
- }
-
- if (!atomic_read(&priv->adapter->is_tx_received)) {
- adapter->empty_tx_q_cnt++;
- if (adapter->empty_tx_q_cnt == MWIFIEX_MAX_EMPTY_TX_Q_CNT) {
- /*
- * No Tx traffic for 200msec. Get scan command from
- * scan pending queue and put to cmd pending queue to
- * resume scan operation
- */
- adapter->scan_delay_cnt = 0;
- adapter->empty_tx_q_cnt = 0;
- spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
- cmd_node = list_first_entry(&adapter->scan_pending_q,
- struct cmd_ctrl_node, list);
- list_del(&cmd_node->list);
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
- flags);
-
- mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
- true);
- queue_work(adapter->workqueue, &adapter->main_work);
- goto done;
- }
- } else {
- adapter->empty_tx_q_cnt = 0;
- }
-
- /* Delay scan operation further by 20msec */
- mod_timer(&priv->scan_delay_timer, jiffies +
- msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
- adapter->scan_delay_cnt++;
-
-done:
- if (atomic_read(&priv->adapter->is_tx_received))
- atomic_set(&priv->adapter->is_tx_received, false);
-
- return;
-}
-
/*
* This function initializes the private structure and sets default
* values to the members.
@@ -214,8 +133,8 @@ int mwifiex_init_priv(struct mwifiex_private *priv)
priv->scan_block = false;
- setup_timer(&priv->scan_delay_timer, scan_delay_timer_fn,
- (unsigned long)priv);
+ priv->csa_chan = 0;
+ priv->csa_expire_time = 0;
return mwifiex_add_bss_prio_tbl(priv);
}
@@ -447,23 +366,29 @@ static void mwifiex_free_lock_list(struct mwifiex_adapter *adapter)
}
/*
- * This function frees the adapter structure.
+ * This function performs cleanup for adapter structure.
*
- * The freeing operation is done recursively, by canceling all
- * pending commands, freeing the member buffers previously
- * allocated (command buffers, scan table buffer, sleep confirm
- * command buffer), stopping the timers and calling the cleanup
- * routines for every interface, before the actual adapter
- * structure is freed.
+ * The cleanup is done recursively, by canceling all pending
+ * commands, freeing the member buffers previously allocated
+ * (command buffers, scan table buffer, sleep confirm command
+ * buffer), stopping the timers and calling the cleanup routines
+ * for every interface.
*/
static void
-mwifiex_free_adapter(struct mwifiex_adapter *adapter)
+mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
{
+ int i;
+
if (!adapter) {
pr_err("%s: adapter is NULL\n", __func__);
return;
}
+ for (i = 0; i < adapter->priv_num; i++) {
+ if (adapter->priv[i])
+ del_timer_sync(&adapter->priv[i]->scan_delay_timer);
+ }
+
mwifiex_cancel_all_pending_cmd(adapter);
/* Free lock variables */
@@ -684,7 +609,6 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
int ret = -EINPROGRESS;
struct mwifiex_private *priv;
s32 i;
- unsigned long flags;
struct sk_buff *skb;
/* mwifiex already shutdown */
@@ -719,7 +643,7 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
}
}
- spin_lock_irqsave(&adapter->mwifiex_lock, flags);
+ spin_lock(&adapter->mwifiex_lock);
if (adapter->if_ops.data_complete) {
while ((skb = skb_dequeue(&adapter->usb_rx_data_q))) {
@@ -733,10 +657,9 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
}
}
- /* Free adapter structure */
- mwifiex_free_adapter(adapter);
+ mwifiex_adapter_cleanup(adapter);
- spin_unlock_irqrestore(&adapter->mwifiex_lock, flags);
+ spin_unlock(&adapter->mwifiex_lock);
/* Notify completion */
ret = mwifiex_shutdown_fw_complete(adapter);
@@ -770,7 +693,7 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
if (!ret) {
dev_notice(adapter->dev,
"WLAN FW already running! Skip FW dnld\n");
- goto done;
+ return 0;
}
poll_num = MAX_FIRMWARE_POLL_TRIES;
@@ -796,14 +719,8 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
poll_fw:
/* Check if the firmware is downloaded successfully or not */
ret = adapter->if_ops.check_fw_status(adapter, poll_num);
- if (ret) {
+ if (ret)
dev_err(adapter->dev, "FW failed to be active in time\n");
- return -1;
- }
-done:
- /* re-enable host interrupt for mwifiex after fw dnld is successful */
- if (adapter->if_ops.enable_int)
- adapter->if_ops.enable_int(adapter);
return ret;
}
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 6bcb66e6e97c..12e778159ec5 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -534,6 +534,8 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
mwifiex_cmd_append_tsf_tlv(priv, &pos, bss_desc);
+ mwifiex_11h_process_join(priv, &pos, bss_desc);
+
cmd->size = cpu_to_le16((u16) (pos - (u8 *) assoc) + S_DS_GEN);
/* Set the Capability info at last */
@@ -919,9 +921,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
memcpy(&priv->curr_bss_params.data_rates,
&adhoc_start->data_rate, priv->curr_bss_params.num_of_rates);
- dev_dbg(adapter->dev, "info: ADHOC_S_CMD: rates=%02x %02x %02x %02x\n",
- adhoc_start->data_rate[0], adhoc_start->data_rate[1],
- adhoc_start->data_rate[2], adhoc_start->data_rate[3]);
+ dev_dbg(adapter->dev, "info: ADHOC_S_CMD: rates=%4ph\n",
+ adhoc_start->data_rate);
dev_dbg(adapter->dev, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n");
@@ -1290,8 +1291,10 @@ int mwifiex_associate(struct mwifiex_private *priv,
{
u8 current_bssid[ETH_ALEN];
- /* Return error if the adapter or table entry is not marked as infra */
- if ((priv->bss_mode != NL80211_IFTYPE_STATION) ||
+ /* Return error if the adapter is not STA role or table entry
+ * is not marked as infra.
+ */
+ if ((GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) ||
(bss_desc->bss_mode != NL80211_IFTYPE_STATION))
return -1;
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 2eb88ea9acf7..1753431de361 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -25,6 +25,86 @@
#define VERSION "1.0"
const char driver_version[] = "mwifiex " VERSION " (%s) ";
+static char *cal_data_cfg;
+module_param(cal_data_cfg, charp, 0);
+
+static void scan_delay_timer_fn(unsigned long data)
+{
+ struct mwifiex_private *priv = (struct mwifiex_private *)data;
+ struct mwifiex_adapter *adapter = priv->adapter;
+ struct cmd_ctrl_node *cmd_node, *tmp_node;
+ unsigned long flags;
+
+ if (adapter->surprise_removed)
+ return;
+
+ if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) {
+ /*
+ * Abort scan operation by cancelling all pending scan
+ * commands
+ */
+ spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+ list_for_each_entry_safe(cmd_node, tmp_node,
+ &adapter->scan_pending_q, list) {
+ list_del(&cmd_node->list);
+ mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+ }
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+
+ spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+ adapter->scan_processing = false;
+ adapter->scan_delay_cnt = 0;
+ adapter->empty_tx_q_cnt = 0;
+ spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+
+ if (priv->scan_request) {
+ dev_dbg(adapter->dev, "info: aborting scan\n");
+ cfg80211_scan_done(priv->scan_request, 1);
+ priv->scan_request = NULL;
+ } else {
+ priv->scan_aborting = false;
+ dev_dbg(adapter->dev, "info: scan already aborted\n");
+ }
+ goto done;
+ }
+
+ if (!atomic_read(&priv->adapter->is_tx_received)) {
+ adapter->empty_tx_q_cnt++;
+ if (adapter->empty_tx_q_cnt == MWIFIEX_MAX_EMPTY_TX_Q_CNT) {
+ /*
+ * No Tx traffic for 200msec. Get scan command from
+ * scan pending queue and put to cmd pending queue to
+ * resume scan operation
+ */
+ adapter->scan_delay_cnt = 0;
+ adapter->empty_tx_q_cnt = 0;
+ spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+ cmd_node = list_first_entry(&adapter->scan_pending_q,
+ struct cmd_ctrl_node, list);
+ list_del(&cmd_node->list);
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
+ flags);
+
+ mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
+ true);
+ queue_work(adapter->workqueue, &adapter->main_work);
+ goto done;
+ }
+ } else {
+ adapter->empty_tx_q_cnt = 0;
+ }
+
+ /* Delay scan operation further by 20msec */
+ mod_timer(&priv->scan_delay_timer, jiffies +
+ msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
+ adapter->scan_delay_cnt++;
+
+done:
+ if (atomic_read(&priv->adapter->is_tx_received))
+ atomic_set(&priv->adapter->is_tx_received, false);
+
+ return;
+}
/*
* This function registers the device and performs all the necessary
@@ -73,6 +153,10 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
adapter->priv[i]->adapter = adapter;
adapter->priv_num++;
+
+ setup_timer(&adapter->priv[i]->scan_delay_timer,
+ scan_delay_timer_fn,
+ (unsigned long)adapter->priv[i]);
}
mwifiex_init_lock_list(adapter);
@@ -336,6 +420,17 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
dev_notice(adapter->dev, "WLAN FW is active\n");
+ if (cal_data_cfg) {
+ if ((request_firmware(&adapter->cal_data, cal_data_cfg,
+ adapter->dev)) < 0)
+ dev_err(adapter->dev,
+ "Cal data request_firmware() failed\n");
+ }
+
+ /* enable host interrupt after fw dnld is successful */
+ if (adapter->if_ops.enable_int)
+ adapter->if_ops.enable_int(adapter);
+
adapter->init_wait_q_woken = false;
ret = mwifiex_init_fw(adapter);
if (ret == -1) {
@@ -387,9 +482,15 @@ err_add_intf:
mwifiex_del_virtual_intf(adapter->wiphy, priv->wdev);
rtnl_unlock();
err_init_fw:
+ if (adapter->if_ops.disable_int)
+ adapter->if_ops.disable_int(adapter);
pr_debug("info: %s: unregister device\n", __func__);
adapter->if_ops.unregister_dev(adapter);
done:
+ if (adapter->cal_data) {
+ release_firmware(adapter->cal_data);
+ adapter->cal_data = NULL;
+ }
release_firmware(adapter->firmware);
complete(&adapter->fw_load);
return;
@@ -436,6 +537,7 @@ mwifiex_close(struct net_device *dev)
dev_dbg(priv->adapter->dev, "aborting scan on ndo_stop\n");
cfg80211_scan_done(priv->scan_request, 1);
priv->scan_request = NULL;
+ priv->scan_aborting = true;
}
return 0;
@@ -573,9 +675,8 @@ static void mwifiex_set_multicast_list(struct net_device *dev)
mcast_list.mode = MWIFIEX_ALL_MULTI_MODE;
} else {
mcast_list.mode = MWIFIEX_MULTICAST_MODE;
- if (netdev_mc_count(dev))
- mcast_list.num_multicast_addr =
- mwifiex_copy_mcast_addr(&mcast_list, dev);
+ mcast_list.num_multicast_addr =
+ mwifiex_copy_mcast_addr(&mcast_list, dev);
}
mwifiex_request_set_multicast_list(priv, &mcast_list);
}
@@ -760,7 +861,7 @@ mwifiex_add_card(void *card, struct semaphore *sem,
INIT_WORK(&adapter->main_work, mwifiex_main_work_queue);
/* Register the device. Fill up the private data structure with relevant
- information from the card and request for the required IRQ. */
+ information from the card. */
if (adapter->if_ops.register_dev(adapter)) {
pr_err("%s: failed to register mwifiex device\n", __func__);
goto err_registerdev;
@@ -824,6 +925,11 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
if (!adapter)
goto exit_remove;
+ /* We can no longer handle interrupts once we start doing the teardown
+ * below. */
+ if (adapter->if_ops.disable_int)
+ adapter->if_ops.disable_int(adapter);
+
adapter->surprise_removed = true;
/* Stop data */
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 4ef67fca06d3..253e0bd38e25 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -309,6 +309,9 @@ struct mwifiex_bssdescriptor {
u16 wapi_offset;
u8 *beacon_buf;
u32 beacon_buf_size;
+ u8 sensed_11h;
+ u8 local_constraint;
+ u8 chan_sw_ie_present;
};
struct mwifiex_current_bss_params {
@@ -492,7 +495,6 @@ struct mwifiex_private {
struct semaphore async_sem;
u8 report_scan_result;
struct cfg80211_scan_request *scan_request;
- struct mwifiex_user_scan_cfg *user_scan_cfg;
u8 cfg_bssid[6];
struct wps wps;
u8 scan_block;
@@ -510,6 +512,9 @@ struct mwifiex_private {
u8 ap_11ac_enabled;
u32 mgmt_frame_mask;
struct mwifiex_roc_cfg roc_cfg;
+ bool scan_aborting;
+ u8 csa_chan;
+ unsigned long csa_expire_time;
};
enum mwifiex_ba_status {
@@ -596,6 +601,7 @@ struct mwifiex_if_ops {
int (*register_dev) (struct mwifiex_adapter *);
void (*unregister_dev) (struct mwifiex_adapter *);
int (*enable_int) (struct mwifiex_adapter *);
+ void (*disable_int) (struct mwifiex_adapter *);
int (*process_int_status) (struct mwifiex_adapter *);
int (*host_to_card) (struct mwifiex_adapter *, u8, struct sk_buff *,
struct mwifiex_tx_param *);
@@ -730,6 +736,7 @@ struct mwifiex_adapter {
u16 max_mgmt_ie_index;
u8 scan_delay_cnt;
u8 empty_tx_q_cnt;
+ const struct firmware *cal_data;
/* 11AC */
u32 is_hw_11ac_capable;
@@ -1017,6 +1024,24 @@ static inline bool mwifiex_is_skb_mgmt_frame(struct sk_buff *skb)
return (*(u32 *)skb->data == PKT_TYPE_MGMT);
}
+/* This function retrieves channel closed for operation by Channel
+ * Switch Announcement.
+ */
+static inline u8
+mwifiex_11h_get_csa_closed_channel(struct mwifiex_private *priv)
+{
+ if (!priv->csa_chan)
+ return 0;
+
+ /* Clear csa channel, if DFS channel move time has passed */
+ if (jiffies > priv->csa_expire_time) {
+ priv->csa_chan = 0;
+ priv->csa_expire_time = 0;
+ }
+
+ return priv->csa_chan;
+}
+
int mwifiex_init_shutdown_fw(struct mwifiex_private *priv,
u32 func_init_shutdown);
int mwifiex_add_card(void *, struct semaphore *, struct mwifiex_if_ops *, u8);
@@ -1115,6 +1140,12 @@ int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
struct cfg80211_beacon_data *data);
int mwifiex_del_mgmt_ies(struct mwifiex_private *priv);
u8 *mwifiex_11d_code_2_region(u8 code);
+void mwifiex_uap_del_sta_data(struct mwifiex_private *priv,
+ struct mwifiex_sta_node *node);
+
+void mwifiex_11h_process_join(struct mwifiex_private *priv, u8 **buffer,
+ struct mwifiex_bssdescriptor *bss_desc);
+int mwifiex_11h_handle_event_chanswann(struct mwifiex_private *priv);
extern const struct ethtool_ops mwifiex_ethtool_ops;
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 9cf5d8f07df8..c447d9bd1aa9 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -391,6 +391,12 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv,
return 0;
}
+ if (bss_desc->chan_sw_ie_present) {
+ dev_err(adapter->dev,
+ "Don't connect to AP with WLAN_EID_CHANNEL_SWITCH\n");
+ return -1;
+ }
+
if (mwifiex_is_bss_wapi(priv, bss_desc)) {
dev_dbg(adapter->dev, "info: return success for WAPI AP\n");
return 0;
@@ -569,6 +575,9 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
return -1;
}
+ /* Check csa channel expiry before preparing scan list */
+ mwifiex_11h_get_csa_closed_channel(priv);
+
chan_tlv_out->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
/* Set the temp channel struct pointer to the start of the desired
@@ -598,6 +607,11 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
while (tlv_idx < max_chan_per_scan &&
tmp_chan_list->chan_number && !done_early) {
+ if (tmp_chan_list->chan_number == priv->csa_chan) {
+ tmp_chan_list++;
+ continue;
+ }
+
dev_dbg(priv->adapter->dev,
"info: Scan: Chan(%3d), Radio(%d),"
" Mode(%d, %d), Dur(%d)\n",
@@ -1169,6 +1183,19 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
bss_entry->erp_flags = *(current_ptr + 2);
break;
+ case WLAN_EID_PWR_CONSTRAINT:
+ bss_entry->local_constraint = *(current_ptr + 2);
+ bss_entry->sensed_11h = true;
+ break;
+
+ case WLAN_EID_CHANNEL_SWITCH:
+ bss_entry->chan_sw_ie_present = true;
+ case WLAN_EID_PWR_CAPABILITY:
+ case WLAN_EID_TPC_REPORT:
+ case WLAN_EID_QUIET:
+ bss_entry->sensed_11h = true;
+ break;
+
case WLAN_EID_EXT_SUPP_RATES:
/*
* Only process extended supported rate
@@ -1575,6 +1602,9 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
goto check_next_scan;
}
+ /* Check csa channel expiry before parsing scan response */
+ mwifiex_11h_get_csa_closed_channel(priv);
+
bytes_left = le16_to_cpu(scan_rsp->bss_descript_size);
dev_dbg(adapter->dev, "info: SCAN_RESP: bss_descript_size %d\n",
bytes_left);
@@ -1727,6 +1757,13 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
struct ieee80211_channel *chan;
u8 band;
+ /* Skip entry if on csa closed channel */
+ if (channel == priv->csa_chan) {
+ dev_dbg(adapter->dev,
+ "Dropping entry on csa closed channel\n");
+ continue;
+ }
+
band = BAND_G;
if (chan_band_tlv) {
chan_band =
@@ -1784,22 +1821,17 @@ check_next_scan:
if (priv->report_scan_result)
priv->report_scan_result = false;
- if (priv->user_scan_cfg) {
- if (priv->scan_request) {
- dev_dbg(priv->adapter->dev,
- "info: notifying scan done\n");
- cfg80211_scan_done(priv->scan_request, 0);
- priv->scan_request = NULL;
- } else {
- dev_dbg(priv->adapter->dev,
- "info: scan already aborted\n");
- }
-
- kfree(priv->user_scan_cfg);
- priv->user_scan_cfg = NULL;
+ if (priv->scan_request) {
+ dev_dbg(adapter->dev, "info: notifying scan done\n");
+ cfg80211_scan_done(priv->scan_request, 0);
+ priv->scan_request = NULL;
+ } else {
+ priv->scan_aborting = false;
+ dev_dbg(adapter->dev, "info: scan already aborted\n");
}
} else {
- if (priv->user_scan_cfg && !priv->scan_request) {
+ if ((priv->scan_aborting && !priv->scan_request) ||
+ priv->scan_block) {
spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
flags);
adapter->scan_delay_cnt = MWIFIEX_MAX_SCAN_DELAY_CNT;
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index 363ba31b58bf..09185c963248 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -51,6 +51,7 @@ static struct mwifiex_if_ops sdio_ops;
static struct semaphore add_remove_card_sem;
static int mwifiex_sdio_resume(struct device *dev);
+static void mwifiex_sdio_interrupt(struct sdio_func *func);
/*
* SDIO probe.
@@ -77,6 +78,17 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
+ if (id->driver_data) {
+ struct mwifiex_sdio_device *data = (void *)id->driver_data;
+
+ card->firmware = data->firmware;
+ card->reg = data->reg;
+ card->max_ports = data->max_ports;
+ card->mp_agg_pkt_limit = data->mp_agg_pkt_limit;
+ card->supports_sdio_new_mode = data->supports_sdio_new_mode;
+ card->has_control_mask = data->has_control_mask;
+ }
+
sdio_claim_host(func);
ret = sdio_enable_func(func);
sdio_release_host(func);
@@ -251,12 +263,19 @@ static int mwifiex_sdio_resume(struct device *dev)
#define SDIO_DEVICE_ID_MARVELL_8787 (0x9119)
/* Device ID for SD8797 */
#define SDIO_DEVICE_ID_MARVELL_8797 (0x9129)
+/* Device ID for SD8897 */
+#define SDIO_DEVICE_ID_MARVELL_8897 (0x912d)
/* WLAN IDs */
static const struct sdio_device_id mwifiex_ids[] = {
- {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8786)},
- {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787)},
- {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8786),
+ .driver_data = (unsigned long) &mwifiex_sdio_sd8786},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787),
+ .driver_data = (unsigned long) &mwifiex_sdio_sd8787},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797),
+ .driver_data = (unsigned long) &mwifiex_sdio_sd8797},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8897),
+ .driver_data = (unsigned long) &mwifiex_sdio_sd8897},
{},
};
@@ -278,17 +297,26 @@ static struct sdio_driver mwifiex_sdio = {
}
};
+/* Write data into SDIO card register. Caller claims SDIO device. */
+static int
+mwifiex_write_reg_locked(struct sdio_func *func, u32 reg, u8 data)
+{
+ int ret = -1;
+ sdio_writeb(func, data, reg, &ret);
+ return ret;
+}
+
/*
* This function writes data into SDIO card register.
*/
static int
-mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u32 data)
+mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u8 data)
{
struct sdio_mmc_card *card = adapter->card;
- int ret = -1;
+ int ret;
sdio_claim_host(card->func);
- sdio_writeb(card->func, (u8) data, reg, &ret);
+ ret = mwifiex_write_reg_locked(card->func, reg, data);
sdio_release_host(card->func);
return ret;
@@ -298,7 +326,7 @@ mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u32 data)
* This function reads data from SDIO card register.
*/
static int
-mwifiex_read_reg(struct mwifiex_adapter *adapter, u32 reg, u32 *data)
+mwifiex_read_reg(struct mwifiex_adapter *adapter, u32 reg, u8 *data)
{
struct sdio_mmc_card *card = adapter->card;
int ret = -1;
@@ -400,7 +428,40 @@ static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
}
/*
- * This function initializes the IO ports.
+ * This function is used to initialize IO ports for the
+ * chipsets supporting SDIO new mode eg SD8897.
+ */
+static int mwifiex_init_sdio_new_mode(struct mwifiex_adapter *adapter)
+{
+ u8 reg;
+
+ adapter->ioport = MEM_PORT;
+
+ /* enable sdio new mode */
+ if (mwifiex_read_reg(adapter, CARD_CONFIG_2_1_REG, &reg))
+ return -1;
+ if (mwifiex_write_reg(adapter, CARD_CONFIG_2_1_REG,
+ reg | CMD53_NEW_MODE))
+ return -1;
+
+ /* Configure cmd port and enable reading rx length from the register */
+ if (mwifiex_read_reg(adapter, CMD_CONFIG_0, &reg))
+ return -1;
+ if (mwifiex_write_reg(adapter, CMD_CONFIG_0, reg | CMD_PORT_RD_LEN_EN))
+ return -1;
+
+ /* Enable Dnld/Upld ready auto reset for cmd port after cmd53 is
+ * completed
+ */
+ if (mwifiex_read_reg(adapter, CMD_CONFIG_1, &reg))
+ return -1;
+ if (mwifiex_write_reg(adapter, CMD_CONFIG_1, reg | CMD_PORT_AUTO_EN))
+ return -1;
+
+ return 0;
+}
+
+/* This function initializes the IO ports.
*
* The following operations are performed -
* - Read the IO ports (0, 1 and 2)
@@ -409,10 +470,17 @@ static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
*/
static int mwifiex_init_sdio_ioport(struct mwifiex_adapter *adapter)
{
- u32 reg;
+ u8 reg;
+ struct sdio_mmc_card *card = adapter->card;
adapter->ioport = 0;
+ if (card->supports_sdio_new_mode) {
+ if (mwifiex_init_sdio_new_mode(adapter))
+ return -1;
+ goto cont;
+ }
+
/* Read the IO port */
if (!mwifiex_read_reg(adapter, IO_PORT_0_REG, &reg))
adapter->ioport |= (reg & 0xff);
@@ -428,19 +496,19 @@ static int mwifiex_init_sdio_ioport(struct mwifiex_adapter *adapter)
adapter->ioport |= ((reg & 0xff) << 16);
else
return -1;
-
+cont:
pr_debug("info: SDIO FUNC1 IO port: %#x\n", adapter->ioport);
/* Set Host interrupt reset to read to clear */
if (!mwifiex_read_reg(adapter, HOST_INT_RSR_REG, &reg))
mwifiex_write_reg(adapter, HOST_INT_RSR_REG,
- reg | SDIO_INT_MASK);
+ reg | card->reg->sdio_int_mask);
else
return -1;
/* Dnld/Upld ready set to auto reset */
- if (!mwifiex_read_reg(adapter, CARD_MISC_CFG_REG, &reg))
- mwifiex_write_reg(adapter, CARD_MISC_CFG_REG,
+ if (!mwifiex_read_reg(adapter, card->reg->card_misc_cfg_reg, &reg))
+ mwifiex_write_reg(adapter, card->reg->card_misc_cfg_reg,
reg | AUTO_RE_ENABLE_INT);
else
return -1;
@@ -486,34 +554,42 @@ static int mwifiex_write_data_to_card(struct mwifiex_adapter *adapter,
static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port)
{
struct sdio_mmc_card *card = adapter->card;
- u16 rd_bitmap = card->mp_rd_bitmap;
+ const struct mwifiex_sdio_card_reg *reg = card->reg;
+ u32 rd_bitmap = card->mp_rd_bitmap;
- dev_dbg(adapter->dev, "data: mp_rd_bitmap=0x%04x\n", rd_bitmap);
+ dev_dbg(adapter->dev, "data: mp_rd_bitmap=0x%08x\n", rd_bitmap);
- if (!(rd_bitmap & (CTRL_PORT_MASK | DATA_PORT_MASK)))
- return -1;
+ if (card->supports_sdio_new_mode) {
+ if (!(rd_bitmap & reg->data_port_mask))
+ return -1;
+ } else {
+ if (!(rd_bitmap & (CTRL_PORT_MASK | reg->data_port_mask)))
+ return -1;
+ }
- if (card->mp_rd_bitmap & CTRL_PORT_MASK) {
- card->mp_rd_bitmap &= (u16) (~CTRL_PORT_MASK);
+ if ((card->has_control_mask) &&
+ (card->mp_rd_bitmap & CTRL_PORT_MASK)) {
+ card->mp_rd_bitmap &= (u32) (~CTRL_PORT_MASK);
*port = CTRL_PORT;
- dev_dbg(adapter->dev, "data: port=%d mp_rd_bitmap=0x%04x\n",
+ dev_dbg(adapter->dev, "data: port=%d mp_rd_bitmap=0x%08x\n",
*port, card->mp_rd_bitmap);
- } else {
- if (card->mp_rd_bitmap & (1 << card->curr_rd_port)) {
- card->mp_rd_bitmap &= (u16)
- (~(1 << card->curr_rd_port));
- *port = card->curr_rd_port;
+ return 0;
+ }
- if (++card->curr_rd_port == MAX_PORT)
- card->curr_rd_port = 1;
- } else {
- return -1;
- }
+ if (!(card->mp_rd_bitmap & (1 << card->curr_rd_port)))
+ return -1;
+
+ /* We are now handling the SDIO data ports */
+ card->mp_rd_bitmap &= (u32)(~(1 << card->curr_rd_port));
+ *port = card->curr_rd_port;
+
+ if (++card->curr_rd_port == card->max_ports)
+ card->curr_rd_port = reg->start_rd_port;
+
+ dev_dbg(adapter->dev,
+ "data: port=%d mp_rd_bitmap=0x%08x -> 0x%08x\n",
+ *port, rd_bitmap, card->mp_rd_bitmap);
- dev_dbg(adapter->dev,
- "data: port=%d mp_rd_bitmap=0x%04x -> 0x%04x\n",
- *port, rd_bitmap, card->mp_rd_bitmap);
- }
return 0;
}
@@ -524,35 +600,45 @@ static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port)
* increased (provided it does not reach the maximum limit, in which
* case it is reset to 1)
*/
-static int mwifiex_get_wr_port_data(struct mwifiex_adapter *adapter, u8 *port)
+static int mwifiex_get_wr_port_data(struct mwifiex_adapter *adapter, u32 *port)
{
struct sdio_mmc_card *card = adapter->card;
- u16 wr_bitmap = card->mp_wr_bitmap;
+ const struct mwifiex_sdio_card_reg *reg = card->reg;
+ u32 wr_bitmap = card->mp_wr_bitmap;
- dev_dbg(adapter->dev, "data: mp_wr_bitmap=0x%04x\n", wr_bitmap);
+ dev_dbg(adapter->dev, "data: mp_wr_bitmap=0x%08x\n", wr_bitmap);
- if (!(wr_bitmap & card->mp_data_port_mask))
+ if (card->supports_sdio_new_mode &&
+ !(wr_bitmap & reg->data_port_mask)) {
+ adapter->data_sent = true;
+ return -EBUSY;
+ } else if (!card->supports_sdio_new_mode &&
+ !(wr_bitmap & card->mp_data_port_mask)) {
return -1;
+ }
if (card->mp_wr_bitmap & (1 << card->curr_wr_port)) {
- card->mp_wr_bitmap &= (u16) (~(1 << card->curr_wr_port));
+ card->mp_wr_bitmap &= (u32) (~(1 << card->curr_wr_port));
*port = card->curr_wr_port;
- if (++card->curr_wr_port == card->mp_end_port)
- card->curr_wr_port = 1;
+ if (((card->supports_sdio_new_mode) &&
+ (++card->curr_wr_port == card->max_ports)) ||
+ ((!card->supports_sdio_new_mode) &&
+ (++card->curr_wr_port == card->mp_end_port)))
+ card->curr_wr_port = reg->start_wr_port;
} else {
adapter->data_sent = true;
return -EBUSY;
}
- if (*port == CTRL_PORT) {
- dev_err(adapter->dev, "invalid data port=%d cur port=%d"
- " mp_wr_bitmap=0x%04x -> 0x%04x\n",
+ if ((card->has_control_mask) && (*port == CTRL_PORT)) {
+ dev_err(adapter->dev,
+ "invalid data port=%d cur port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n",
*port, card->curr_wr_port, wr_bitmap,
card->mp_wr_bitmap);
return -1;
}
- dev_dbg(adapter->dev, "data: port=%d mp_wr_bitmap=0x%04x -> 0x%04x\n",
+ dev_dbg(adapter->dev, "data: port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n",
*port, wr_bitmap, card->mp_wr_bitmap);
return 0;
@@ -564,11 +650,12 @@ static int mwifiex_get_wr_port_data(struct mwifiex_adapter *adapter, u8 *port)
static int
mwifiex_sdio_poll_card_status(struct mwifiex_adapter *adapter, u8 bits)
{
+ struct sdio_mmc_card *card = adapter->card;
u32 tries;
- u32 cs;
+ u8 cs;
for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
- if (mwifiex_read_reg(adapter, CARD_STATUS_REG, &cs))
+ if (mwifiex_read_reg(adapter, card->reg->poll_reg, &cs))
break;
else if ((cs & bits) == bits)
return 0;
@@ -587,12 +674,14 @@ mwifiex_sdio_poll_card_status(struct mwifiex_adapter *adapter, u8 bits)
static int
mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat)
{
- u32 fws0, fws1;
+ struct sdio_mmc_card *card = adapter->card;
+ const struct mwifiex_sdio_card_reg *reg = card->reg;
+ u8 fws0, fws1;
- if (mwifiex_read_reg(adapter, CARD_FW_STATUS0_REG, &fws0))
+ if (mwifiex_read_reg(adapter, reg->status_reg_0, &fws0))
return -1;
- if (mwifiex_read_reg(adapter, CARD_FW_STATUS1_REG, &fws1))
+ if (mwifiex_read_reg(adapter, reg->status_reg_1, &fws1))
return -1;
*dat = (u16) ((fws1 << 8) | fws0);
@@ -606,23 +695,15 @@ mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat)
* The host interrupt mask is read, the disable bit is reset and
* written back to the card host interrupt mask register.
*/
-static int mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter)
+static void mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter)
{
- u32 host_int_mask;
-
- /* Read back the host_int_mask register */
- if (mwifiex_read_reg(adapter, HOST_INT_MASK_REG, &host_int_mask))
- return -1;
-
- /* Update with the mask and write back to the register */
- host_int_mask &= ~HOST_INT_DISABLE;
-
- if (mwifiex_write_reg(adapter, HOST_INT_MASK_REG, host_int_mask)) {
- dev_err(adapter->dev, "disable host interrupt failed\n");
- return -1;
- }
+ struct sdio_mmc_card *card = adapter->card;
+ struct sdio_func *func = card->func;
- return 0;
+ sdio_claim_host(func);
+ mwifiex_write_reg_locked(func, HOST_INT_MASK_REG, 0);
+ sdio_release_irq(func);
+ sdio_release_host(func);
}
/*
@@ -633,12 +714,30 @@ static int mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter)
*/
static int mwifiex_sdio_enable_host_int(struct mwifiex_adapter *adapter)
{
+ struct sdio_mmc_card *card = adapter->card;
+ struct sdio_func *func = card->func;
+ int ret;
+
+ sdio_claim_host(func);
+
+ /* Request the SDIO IRQ */
+ ret = sdio_claim_irq(func, mwifiex_sdio_interrupt);
+ if (ret) {
+ dev_err(adapter->dev, "claim irq failed: ret=%d\n", ret);
+ goto out;
+ }
+
/* Simply write the mask to the register */
- if (mwifiex_write_reg(adapter, HOST_INT_MASK_REG, HOST_INT_ENABLE)) {
+ ret = mwifiex_write_reg_locked(func, HOST_INT_MASK_REG,
+ card->reg->host_int_enable);
+ if (ret) {
dev_err(adapter->dev, "enable host interrupt failed\n");
- return -1;
+ sdio_release_irq(func);
}
- return 0;
+
+out:
+ sdio_release_host(func);
+ return ret;
}
/*
@@ -686,11 +785,13 @@ static int mwifiex_sdio_card_to_host(struct mwifiex_adapter *adapter,
static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
struct mwifiex_fw_image *fw)
{
+ struct sdio_mmc_card *card = adapter->card;
+ const struct mwifiex_sdio_card_reg *reg = card->reg;
int ret;
u8 *firmware = fw->fw_buf;
u32 firmware_len = fw->fw_len;
u32 offset = 0;
- u32 base0, base1;
+ u8 base0, base1;
u8 *fwbuf;
u16 len = 0;
u32 txlen, tx_blocks = 0, tries;
@@ -727,7 +828,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
break;
for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
- ret = mwifiex_read_reg(adapter, HOST_F1_RD_BASE_0,
+ ret = mwifiex_read_reg(adapter, reg->base_0_reg,
&base0);
if (ret) {
dev_err(adapter->dev,
@@ -736,7 +837,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
base0, base0);
goto done;
}
- ret = mwifiex_read_reg(adapter, HOST_F1_RD_BASE_1,
+ ret = mwifiex_read_reg(adapter, reg->base_1_reg,
&base1);
if (ret) {
dev_err(adapter->dev,
@@ -828,10 +929,11 @@ done:
static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
u32 poll_num)
{
+ struct sdio_mmc_card *card = adapter->card;
int ret = 0;
u16 firmware_stat;
u32 tries;
- u32 winner_status;
+ u8 winner_status;
/* Wait for firmware initialization event */
for (tries = 0; tries < poll_num; tries++) {
@@ -849,7 +951,7 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
if (ret) {
if (mwifiex_read_reg
- (adapter, CARD_FW_STATUS0_REG, &winner_status))
+ (adapter, card->reg->status_reg_0, &winner_status))
winner_status = 0;
if (winner_status)
@@ -866,12 +968,12 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
{
struct sdio_mmc_card *card = adapter->card;
- u32 sdio_ireg;
+ u8 sdio_ireg;
unsigned long flags;
- if (mwifiex_read_data_sync(adapter, card->mp_regs, MAX_MP_REGS,
- REG_PORT | MWIFIEX_SDIO_BYTE_MODE_MASK,
- 0)) {
+ if (mwifiex_read_data_sync(adapter, card->mp_regs,
+ card->reg->max_mp_regs,
+ REG_PORT | MWIFIEX_SDIO_BYTE_MODE_MASK, 0)) {
dev_err(adapter->dev, "read mp_regs failed\n");
return;
}
@@ -880,6 +982,9 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
if (sdio_ireg) {
/*
* DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS
+ * For SDIO new mode CMD port interrupts
+ * DN_LD_CMD_PORT_HOST_INT_STATUS and/or
+ * UP_LD_CMD_PORT_HOST_INT_STATUS
* Clear the interrupt status register
*/
dev_dbg(adapter->dev, "int: sdio_ireg = %#x\n", sdio_ireg);
@@ -909,9 +1014,6 @@ mwifiex_sdio_interrupt(struct sdio_func *func)
}
adapter = card->adapter;
- if (adapter->surprise_removed)
- return;
-
if (!adapter->pps_uapsd_mode && adapter->ps_state == PS_STATE_SLEEP)
adapter->ps_state = PS_STATE_AWAKE;
@@ -1003,11 +1105,11 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
s32 f_aggr_cur = 0;
struct sk_buff *skb_deaggr;
u32 pind;
- u32 pkt_len, pkt_type = 0;
+ u32 pkt_len, pkt_type, mport;
u8 *curr_ptr;
u32 rx_len = skb->len;
- if (port == CTRL_PORT) {
+ if ((card->has_control_mask) && (port == CTRL_PORT)) {
/* Read the command Resp without aggr */
dev_dbg(adapter->dev, "info: %s: no aggregation for cmd "
"response\n", __func__);
@@ -1024,7 +1126,10 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
goto rx_curr_single;
}
- if (card->mp_rd_bitmap & (~((u16) CTRL_PORT_MASK))) {
+ if ((!card->has_control_mask && (card->mp_rd_bitmap &
+ card->reg->data_port_mask)) ||
+ (card->has_control_mask && (card->mp_rd_bitmap &
+ (~((u32) CTRL_PORT_MASK))))) {
/* Some more data RX pending */
dev_dbg(adapter->dev, "info: %s: not last packet\n", __func__);
@@ -1060,10 +1165,10 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
if (f_aggr_cur) {
dev_dbg(adapter->dev, "info: current packet aggregation\n");
/* Curr pkt can be aggregated */
- MP_RX_AGGR_SETUP(card, skb, port);
+ mp_rx_aggr_setup(card, skb, port);
if (MP_RX_AGGR_PKT_LIMIT_REACHED(card) ||
- MP_RX_AGGR_PORT_LIMIT_REACHED(card)) {
+ mp_rx_aggr_port_limit_reached(card)) {
dev_dbg(adapter->dev, "info: %s: aggregated packet "
"limit reached\n", __func__);
/* No more pkts allowed in Aggr buf, rx it */
@@ -1076,11 +1181,28 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
dev_dbg(adapter->dev, "info: do_rx_aggr: num of packets: %d\n",
card->mpa_rx.pkt_cnt);
+ if (card->supports_sdio_new_mode) {
+ int i;
+ u32 port_count;
+
+ for (i = 0, port_count = 0; i < card->max_ports; i++)
+ if (card->mpa_rx.ports & BIT(i))
+ port_count++;
+
+ /* Reading data from "start_port + 0" to "start_port +
+ * port_count -1", so decrease the count by 1
+ */
+ port_count--;
+ mport = (adapter->ioport | SDIO_MPA_ADDR_BASE |
+ (port_count << 8)) + card->mpa_rx.start_port;
+ } else {
+ mport = (adapter->ioport | SDIO_MPA_ADDR_BASE |
+ (card->mpa_rx.ports << 4)) +
+ card->mpa_rx.start_port;
+ }
+
if (mwifiex_read_data_sync(adapter, card->mpa_rx.buf,
- card->mpa_rx.buf_len,
- (adapter->ioport | 0x1000 |
- (card->mpa_rx.ports << 4)) +
- card->mpa_rx.start_port, 1))
+ card->mpa_rx.buf_len, mport, 1))
goto error;
curr_ptr = card->mpa_rx.buf;
@@ -1167,6 +1289,7 @@ error:
static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
{
struct sdio_mmc_card *card = adapter->card;
+ const struct mwifiex_sdio_card_reg *reg = card->reg;
int ret = 0;
u8 sdio_ireg;
struct sk_buff *skb;
@@ -1175,6 +1298,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
u32 rx_blocks;
u16 rx_len;
unsigned long flags;
+ u32 bitmap;
+ u8 cr;
spin_lock_irqsave(&adapter->int_lock, flags);
sdio_ireg = adapter->int_status;
@@ -1184,10 +1309,60 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
if (!sdio_ireg)
return ret;
+ /* Following interrupt is only for SDIO new mode */
+ if (sdio_ireg & DN_LD_CMD_PORT_HOST_INT_STATUS && adapter->cmd_sent)
+ adapter->cmd_sent = false;
+
+ /* Following interrupt is only for SDIO new mode */
+ if (sdio_ireg & UP_LD_CMD_PORT_HOST_INT_STATUS) {
+ u32 pkt_type;
+
+ /* read the len of control packet */
+ rx_len = card->mp_regs[CMD_RD_LEN_1] << 8;
+ rx_len |= (u16) card->mp_regs[CMD_RD_LEN_0];
+ rx_blocks = DIV_ROUND_UP(rx_len, MWIFIEX_SDIO_BLOCK_SIZE);
+ if (rx_len <= INTF_HEADER_LEN ||
+ (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) >
+ MWIFIEX_RX_DATA_BUF_SIZE)
+ return -1;
+ rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
+
+ skb = dev_alloc_skb(rx_len);
+ if (!skb)
+ return -1;
+
+ skb_put(skb, rx_len);
+
+ if (mwifiex_sdio_card_to_host(adapter, &pkt_type, skb->data,
+ skb->len, adapter->ioport |
+ CMD_PORT_SLCT)) {
+ dev_err(adapter->dev,
+ "%s: failed to card_to_host", __func__);
+ dev_kfree_skb_any(skb);
+ goto term_cmd;
+ }
+
+ if ((pkt_type != MWIFIEX_TYPE_CMD) &&
+ (pkt_type != MWIFIEX_TYPE_EVENT))
+ dev_err(adapter->dev,
+ "%s:Received wrong packet on cmd port",
+ __func__);
+
+ mwifiex_decode_rx_packet(adapter, skb, pkt_type);
+ }
+
if (sdio_ireg & DN_LD_HOST_INT_STATUS) {
- card->mp_wr_bitmap = ((u16) card->mp_regs[WR_BITMAP_U]) << 8;
- card->mp_wr_bitmap |= (u16) card->mp_regs[WR_BITMAP_L];
- dev_dbg(adapter->dev, "int: DNLD: wr_bitmap=0x%04x\n",
+ bitmap = (u32) card->mp_regs[reg->wr_bitmap_l];
+ bitmap |= ((u32) card->mp_regs[reg->wr_bitmap_u]) << 8;
+ if (card->supports_sdio_new_mode) {
+ bitmap |=
+ ((u32) card->mp_regs[reg->wr_bitmap_1l]) << 16;
+ bitmap |=
+ ((u32) card->mp_regs[reg->wr_bitmap_1u]) << 24;
+ }
+ card->mp_wr_bitmap = bitmap;
+
+ dev_dbg(adapter->dev, "int: DNLD: wr_bitmap=0x%x\n",
card->mp_wr_bitmap);
if (adapter->data_sent &&
(card->mp_wr_bitmap & card->mp_data_port_mask)) {
@@ -1200,11 +1375,11 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
/* As firmware will not generate download ready interrupt if the port
updated is command port only, cmd_sent should be done for any SDIO
interrupt. */
- if (adapter->cmd_sent) {
+ if (card->has_control_mask && adapter->cmd_sent) {
/* Check if firmware has attach buffer at command port and
update just that in wr_bit_map. */
card->mp_wr_bitmap |=
- (u16) card->mp_regs[WR_BITMAP_L] & CTRL_PORT_MASK;
+ (u32) card->mp_regs[reg->wr_bitmap_l] & CTRL_PORT_MASK;
if (card->mp_wr_bitmap & CTRL_PORT_MASK)
adapter->cmd_sent = false;
}
@@ -1212,9 +1387,16 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
dev_dbg(adapter->dev, "info: cmd_sent=%d data_sent=%d\n",
adapter->cmd_sent, adapter->data_sent);
if (sdio_ireg & UP_LD_HOST_INT_STATUS) {
- card->mp_rd_bitmap = ((u16) card->mp_regs[RD_BITMAP_U]) << 8;
- card->mp_rd_bitmap |= (u16) card->mp_regs[RD_BITMAP_L];
- dev_dbg(adapter->dev, "int: UPLD: rd_bitmap=0x%04x\n",
+ bitmap = (u32) card->mp_regs[reg->rd_bitmap_l];
+ bitmap |= ((u32) card->mp_regs[reg->rd_bitmap_u]) << 8;
+ if (card->supports_sdio_new_mode) {
+ bitmap |=
+ ((u32) card->mp_regs[reg->rd_bitmap_1l]) << 16;
+ bitmap |=
+ ((u32) card->mp_regs[reg->rd_bitmap_1u]) << 24;
+ }
+ card->mp_rd_bitmap = bitmap;
+ dev_dbg(adapter->dev, "int: UPLD: rd_bitmap=0x%x\n",
card->mp_rd_bitmap);
while (true) {
@@ -1224,8 +1406,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
"info: no more rd_port available\n");
break;
}
- len_reg_l = RD_LEN_P0_L + (port << 1);
- len_reg_u = RD_LEN_P0_U + (port << 1);
+ len_reg_l = reg->rd_len_p0_l + (port << 1);
+ len_reg_u = reg->rd_len_p0_u + (port << 1);
rx_len = ((u16) card->mp_regs[len_reg_u]) << 8;
rx_len |= (u16) card->mp_regs[len_reg_l];
dev_dbg(adapter->dev, "info: RX: port=%d rx_len=%u\n",
@@ -1257,37 +1439,33 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
if (mwifiex_sdio_card_to_host_mp_aggr(adapter, skb,
port)) {
- u32 cr = 0;
-
dev_err(adapter->dev, "card_to_host_mpa failed:"
" int status=%#x\n", sdio_ireg);
- if (mwifiex_read_reg(adapter,
- CONFIGURATION_REG, &cr))
- dev_err(adapter->dev,
- "read CFG reg failed\n");
-
- dev_dbg(adapter->dev,
- "info: CFG reg val = %d\n", cr);
- if (mwifiex_write_reg(adapter,
- CONFIGURATION_REG,
- (cr | 0x04)))
- dev_err(adapter->dev,
- "write CFG reg failed\n");
-
- dev_dbg(adapter->dev, "info: write success\n");
- if (mwifiex_read_reg(adapter,
- CONFIGURATION_REG, &cr))
- dev_err(adapter->dev,
- "read CFG reg failed\n");
-
- dev_dbg(adapter->dev,
- "info: CFG reg val =%x\n", cr);
- return -1;
+ goto term_cmd;
}
}
}
return 0;
+
+term_cmd:
+ /* terminate cmd */
+ if (mwifiex_read_reg(adapter, CONFIGURATION_REG, &cr))
+ dev_err(adapter->dev, "read CFG reg failed\n");
+ else
+ dev_dbg(adapter->dev, "info: CFG reg val = %d\n", cr);
+
+ if (mwifiex_write_reg(adapter, CONFIGURATION_REG, (cr | 0x04)))
+ dev_err(adapter->dev, "write CFG reg failed\n");
+ else
+ dev_dbg(adapter->dev, "info: write success\n");
+
+ if (mwifiex_read_reg(adapter, CONFIGURATION_REG, &cr))
+ dev_err(adapter->dev, "read CFG reg failed\n");
+ else
+ dev_dbg(adapter->dev, "info: CFG reg val =%x\n", cr);
+
+ return -1;
}
/*
@@ -1305,7 +1483,7 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
* and return.
*/
static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
- u8 *payload, u32 pkt_len, u8 port,
+ u8 *payload, u32 pkt_len, u32 port,
u32 next_pkt_len)
{
struct sdio_mmc_card *card = adapter->card;
@@ -1314,8 +1492,11 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
s32 f_send_cur_buf = 0;
s32 f_precopy_cur_buf = 0;
s32 f_postcopy_cur_buf = 0;
+ u32 mport;
- if ((!card->mpa_tx.enabled) || (port == CTRL_PORT)) {
+ if (!card->mpa_tx.enabled ||
+ (card->has_control_mask && (port == CTRL_PORT)) ||
+ (card->supports_sdio_new_mode && (port == CMD_PORT_SLCT))) {
dev_dbg(adapter->dev, "info: %s: tx aggregation disabled\n",
__func__);
@@ -1329,7 +1510,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
__func__);
if (MP_TX_AGGR_IN_PROGRESS(card)) {
- if (!MP_TX_AGGR_PORT_LIMIT_REACHED(card) &&
+ if (!mp_tx_aggr_port_limit_reached(card) &&
MP_TX_AGGR_BUF_HAS_ROOM(card, pkt_len)) {
f_precopy_cur_buf = 1;
@@ -1342,7 +1523,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
/* No room in Aggr buf, send it */
f_send_aggr_buf = 1;
- if (MP_TX_AGGR_PORT_LIMIT_REACHED(card) ||
+ if (mp_tx_aggr_port_limit_reached(card) ||
!(card->mp_wr_bitmap &
(1 << card->curr_wr_port)))
f_send_cur_buf = 1;
@@ -1381,7 +1562,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
MP_TX_AGGR_BUF_PUT(card, payload, pkt_len, port);
if (MP_TX_AGGR_PKT_LIMIT_REACHED(card) ||
- MP_TX_AGGR_PORT_LIMIT_REACHED(card))
+ mp_tx_aggr_port_limit_reached(card))
/* No more pkts allowed in Aggr buf, send it */
f_send_aggr_buf = 1;
}
@@ -1390,11 +1571,28 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
dev_dbg(adapter->dev, "data: %s: send aggr buffer: %d %d\n",
__func__,
card->mpa_tx.start_port, card->mpa_tx.ports);
+ if (card->supports_sdio_new_mode) {
+ u32 port_count;
+ int i;
+
+ for (i = 0, port_count = 0; i < card->max_ports; i++)
+ if (card->mpa_tx.ports & BIT(i))
+ port_count++;
+
+ /* Writing data from "start_port + 0" to "start_port +
+ * port_count -1", so decrease the count by 1
+ */
+ port_count--;
+ mport = (adapter->ioport | SDIO_MPA_ADDR_BASE |
+ (port_count << 8)) + card->mpa_tx.start_port;
+ } else {
+ mport = (adapter->ioport | SDIO_MPA_ADDR_BASE |
+ (card->mpa_tx.ports << 4)) +
+ card->mpa_tx.start_port;
+ }
+
ret = mwifiex_write_data_to_card(adapter, card->mpa_tx.buf,
- card->mpa_tx.buf_len,
- (adapter->ioport | 0x1000 |
- (card->mpa_tx.ports << 4)) +
- card->mpa_tx.start_port);
+ card->mpa_tx.buf_len, mport);
MP_TX_AGGR_BUF_RESET(card);
}
@@ -1434,15 +1632,15 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
int ret;
u32 buf_block_len;
u32 blk_size;
- u8 port = CTRL_PORT;
+ u32 port = CTRL_PORT;
u8 *payload = (u8 *)skb->data;
u32 pkt_len = skb->len;
/* Allocate buffer and copy payload */
blk_size = MWIFIEX_SDIO_BLOCK_SIZE;
buf_block_len = (pkt_len + blk_size - 1) / blk_size;
- *(u16 *) &payload[0] = (u16) pkt_len;
- *(u16 *) &payload[2] = type;
+ *(__le16 *)&payload[0] = cpu_to_le16((u16)pkt_len);
+ *(__le16 *)&payload[2] = cpu_to_le16(type);
/*
* This is SDIO specific header
@@ -1465,6 +1663,9 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
pkt_len > MWIFIEX_UPLD_SIZE)
dev_err(adapter->dev, "%s: payload=%p, nb=%d\n",
__func__, payload, pkt_len);
+
+ if (card->supports_sdio_new_mode)
+ port = CMD_PORT_SLCT;
}
/* Transfer data to card */
@@ -1541,9 +1742,7 @@ mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
struct sdio_mmc_card *card = adapter->card;
if (adapter->card) {
- /* Release the SDIO IRQ */
sdio_claim_host(card->func);
- sdio_release_irq(card->func);
sdio_disable_func(card->func);
sdio_release_host(card->func);
sdio_set_drvdata(card->func, NULL);
@@ -1557,7 +1756,7 @@ mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
*/
static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
{
- int ret = 0;
+ int ret;
struct sdio_mmc_card *card = adapter->card;
struct sdio_func *func = card->func;
@@ -1566,49 +1765,21 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
sdio_claim_host(func);
- /* Request the SDIO IRQ */
- ret = sdio_claim_irq(func, mwifiex_sdio_interrupt);
- if (ret) {
- pr_err("claim irq failed: ret=%d\n", ret);
- goto disable_func;
- }
-
/* Set block size */
ret = sdio_set_block_size(card->func, MWIFIEX_SDIO_BLOCK_SIZE);
+ sdio_release_host(func);
if (ret) {
pr_err("cannot set SDIO block size\n");
- ret = -1;
- goto release_irq;
+ return ret;
}
- sdio_release_host(func);
sdio_set_drvdata(func, card);
adapter->dev = &func->dev;
- switch (func->device) {
- case SDIO_DEVICE_ID_MARVELL_8786:
- strcpy(adapter->fw_name, SD8786_DEFAULT_FW_NAME);
- break;
- case SDIO_DEVICE_ID_MARVELL_8797:
- strcpy(adapter->fw_name, SD8797_DEFAULT_FW_NAME);
- break;
- case SDIO_DEVICE_ID_MARVELL_8787:
- default:
- strcpy(adapter->fw_name, SD8787_DEFAULT_FW_NAME);
- break;
- }
+ strcpy(adapter->fw_name, card->firmware);
return 0;
-
-release_irq:
- sdio_release_irq(func);
-disable_func:
- sdio_disable_func(func);
- sdio_release_host(func);
- adapter->card = NULL;
-
- return -1;
}
/*
@@ -1626,8 +1797,9 @@ disable_func:
static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
{
struct sdio_mmc_card *card = adapter->card;
+ const struct mwifiex_sdio_card_reg *reg = card->reg;
int ret;
- u32 sdio_ireg;
+ u8 sdio_ireg;
/*
* Read the HOST_INT_STATUS_REG for ACK the first interrupt got
@@ -1636,39 +1808,41 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
*/
mwifiex_read_reg(adapter, HOST_INTSTATUS_REG, &sdio_ireg);
- /* Disable host interrupt mask register for SDIO */
- mwifiex_sdio_disable_host_int(adapter);
-
/* Get SDIO ioport */
mwifiex_init_sdio_ioport(adapter);
/* Initialize SDIO variables in card */
card->mp_rd_bitmap = 0;
card->mp_wr_bitmap = 0;
- card->curr_rd_port = 1;
- card->curr_wr_port = 1;
+ card->curr_rd_port = reg->start_rd_port;
+ card->curr_wr_port = reg->start_wr_port;
- card->mp_data_port_mask = DATA_PORT_MASK;
+ card->mp_data_port_mask = reg->data_port_mask;
card->mpa_tx.buf_len = 0;
card->mpa_tx.pkt_cnt = 0;
card->mpa_tx.start_port = 0;
card->mpa_tx.enabled = 1;
- card->mpa_tx.pkt_aggr_limit = SDIO_MP_AGGR_DEF_PKT_LIMIT;
+ card->mpa_tx.pkt_aggr_limit = card->mp_agg_pkt_limit;
card->mpa_rx.buf_len = 0;
card->mpa_rx.pkt_cnt = 0;
card->mpa_rx.start_port = 0;
card->mpa_rx.enabled = 1;
- card->mpa_rx.pkt_aggr_limit = SDIO_MP_AGGR_DEF_PKT_LIMIT;
+ card->mpa_rx.pkt_aggr_limit = card->mp_agg_pkt_limit;
/* Allocate buffers for SDIO MP-A */
- card->mp_regs = kzalloc(MAX_MP_REGS, GFP_KERNEL);
+ card->mp_regs = kzalloc(reg->max_mp_regs, GFP_KERNEL);
if (!card->mp_regs)
return -ENOMEM;
+ /* Allocate skb pointer buffers */
+ card->mpa_rx.skb_arr = kzalloc((sizeof(void *)) *
+ card->mp_agg_pkt_limit, GFP_KERNEL);
+ card->mpa_rx.len_arr = kzalloc(sizeof(*card->mpa_rx.len_arr) *
+ card->mp_agg_pkt_limit, GFP_KERNEL);
ret = mwifiex_alloc_sdio_mpa_buffers(adapter,
SDIO_MP_TX_AGGR_DEF_BUF_SIZE,
SDIO_MP_RX_AGGR_DEF_BUF_SIZE);
@@ -1705,6 +1879,8 @@ static void mwifiex_cleanup_sdio(struct mwifiex_adapter *adapter)
struct sdio_mmc_card *card = adapter->card;
kfree(card->mp_regs);
+ kfree(card->mpa_rx.skb_arr);
+ kfree(card->mpa_rx.len_arr);
kfree(card->mpa_tx.buf);
kfree(card->mpa_rx.buf);
}
@@ -1716,16 +1892,20 @@ static void
mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port)
{
struct sdio_mmc_card *card = adapter->card;
+ const struct mwifiex_sdio_card_reg *reg = card->reg;
int i;
card->mp_end_port = port;
- card->mp_data_port_mask = DATA_PORT_MASK;
+ card->mp_data_port_mask = reg->data_port_mask;
- for (i = 1; i <= MAX_PORT - card->mp_end_port; i++)
- card->mp_data_port_mask &= ~(1 << (MAX_PORT - i));
+ if (reg->start_wr_port) {
+ for (i = 1; i <= card->max_ports - card->mp_end_port; i++)
+ card->mp_data_port_mask &=
+ ~(1 << (card->max_ports - i));
+ }
- card->curr_wr_port = 1;
+ card->curr_wr_port = reg->start_wr_port;
dev_dbg(adapter->dev, "cmd: mp_end_port %d, data port mask 0x%x\n",
port, card->mp_data_port_mask);
@@ -1769,6 +1949,7 @@ static struct mwifiex_if_ops sdio_ops = {
.register_dev = mwifiex_register_dev,
.unregister_dev = mwifiex_unregister_dev,
.enable_int = mwifiex_sdio_enable_host_int,
+ .disable_int = mwifiex_sdio_disable_host_int,
.process_int_status = mwifiex_process_int_status,
.host_to_card = mwifiex_sdio_host_to_card,
.wakeup = mwifiex_pm_wakeup_card,
@@ -1831,3 +2012,4 @@ MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE(SD8786_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(SD8897_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index 8cc5468654b4..532ae0ac4dfb 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -32,30 +32,37 @@
#define SD8786_DEFAULT_FW_NAME "mrvl/sd8786_uapsta.bin"
#define SD8787_DEFAULT_FW_NAME "mrvl/sd8787_uapsta.bin"
#define SD8797_DEFAULT_FW_NAME "mrvl/sd8797_uapsta.bin"
+#define SD8897_DEFAULT_FW_NAME "mrvl/sd8897_uapsta.bin"
#define BLOCK_MODE 1
#define BYTE_MODE 0
#define REG_PORT 0
-#define RD_BITMAP_L 0x04
-#define RD_BITMAP_U 0x05
-#define WR_BITMAP_L 0x06
-#define WR_BITMAP_U 0x07
-#define RD_LEN_P0_L 0x08
-#define RD_LEN_P0_U 0x09
#define MWIFIEX_SDIO_IO_PORT_MASK 0xfffff
#define MWIFIEX_SDIO_BYTE_MODE_MASK 0x80000000
+#define SDIO_MPA_ADDR_BASE 0x1000
#define CTRL_PORT 0
#define CTRL_PORT_MASK 0x0001
-#define DATA_PORT_MASK 0xfffe
-#define MAX_MP_REGS 64
-#define MAX_PORT 16
-
-#define SDIO_MP_AGGR_DEF_PKT_LIMIT 8
+#define CMD_PORT_UPLD_INT_MASK (0x1U<<6)
+#define CMD_PORT_DNLD_INT_MASK (0x1U<<7)
+#define HOST_TERM_CMD53 (0x1U << 2)
+#define REG_PORT 0
+#define MEM_PORT 0x10000
+#define CMD_RD_LEN_0 0xB4
+#define CMD_RD_LEN_1 0xB5
+#define CARD_CONFIG_2_1_REG 0xCD
+#define CMD53_NEW_MODE (0x1U << 0)
+#define CMD_CONFIG_0 0xB8
+#define CMD_PORT_RD_LEN_EN (0x1U << 2)
+#define CMD_CONFIG_1 0xB9
+#define CMD_PORT_AUTO_EN (0x1U << 0)
+#define CMD_PORT_SLCT 0x8000
+#define UP_LD_CMD_PORT_HOST_INT_STATUS (0x40U)
+#define DN_LD_CMD_PORT_HOST_INT_STATUS (0x80U)
#define SDIO_MP_TX_AGGR_DEF_BUF_SIZE (8192) /* 8K */
@@ -75,14 +82,8 @@
/* Host Control Registers : Configuration */
#define CONFIGURATION_REG 0x00
-/* Host Control Registers : Host without Command 53 finish host*/
-#define HOST_TO_CARD_EVENT (0x1U << 3)
-/* Host Control Registers : Host without Command 53 finish host */
-#define HOST_WO_CMD53_FINISH_HOST (0x1U << 2)
/* Host Control Registers : Host power up */
#define HOST_POWER_UP (0x1U << 1)
-/* Host Control Registers : Host power down */
-#define HOST_POWER_DOWN (0x1U << 0)
/* Host Control Registers : Host interrupt mask */
#define HOST_INT_MASK_REG 0x02
@@ -90,10 +91,6 @@
#define UP_LD_HOST_INT_MASK (0x1U)
/* Host Control Registers : Download host interrupt mask */
#define DN_LD_HOST_INT_MASK (0x2U)
-/* Enable Host interrupt mask */
-#define HOST_INT_ENABLE (UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK)
-/* Disable Host interrupt mask */
-#define HOST_INT_DISABLE 0xff
/* Host Control Registers : Host interrupt status */
#define HOST_INTSTATUS_REG 0x03
@@ -104,74 +101,15 @@
/* Host Control Registers : Host interrupt RSR */
#define HOST_INT_RSR_REG 0x01
-/* Host Control Registers : Upload host interrupt RSR */
-#define UP_LD_HOST_INT_RSR (0x1U)
-#define SDIO_INT_MASK 0x3F
/* Host Control Registers : Host interrupt status */
#define HOST_INT_STATUS_REG 0x28
-/* Host Control Registers : Upload CRC error */
-#define UP_LD_CRC_ERR (0x1U << 2)
-/* Host Control Registers : Upload restart */
-#define UP_LD_RESTART (0x1U << 1)
-/* Host Control Registers : Download restart */
-#define DN_LD_RESTART (0x1U << 0)
-
-/* Card Control Registers : Card status register */
-#define CARD_STATUS_REG 0x30
+
/* Card Control Registers : Card I/O ready */
#define CARD_IO_READY (0x1U << 3)
-/* Card Control Registers : CIS card ready */
-#define CIS_CARD_RDY (0x1U << 2)
-/* Card Control Registers : Upload card ready */
-#define UP_LD_CARD_RDY (0x1U << 1)
/* Card Control Registers : Download card ready */
#define DN_LD_CARD_RDY (0x1U << 0)
-/* Card Control Registers : Host interrupt mask register */
-#define HOST_INTERRUPT_MASK_REG 0x34
-/* Card Control Registers : Host power interrupt mask */
-#define HOST_POWER_INT_MASK (0x1U << 3)
-/* Card Control Registers : Abort card interrupt mask */
-#define ABORT_CARD_INT_MASK (0x1U << 2)
-/* Card Control Registers : Upload card interrupt mask */
-#define UP_LD_CARD_INT_MASK (0x1U << 1)
-/* Card Control Registers : Download card interrupt mask */
-#define DN_LD_CARD_INT_MASK (0x1U << 0)
-
-/* Card Control Registers : Card interrupt status register */
-#define CARD_INTERRUPT_STATUS_REG 0x38
-/* Card Control Registers : Power up interrupt */
-#define POWER_UP_INT (0x1U << 4)
-/* Card Control Registers : Power down interrupt */
-#define POWER_DOWN_INT (0x1U << 3)
-
-/* Card Control Registers : Card interrupt RSR register */
-#define CARD_INTERRUPT_RSR_REG 0x3c
-/* Card Control Registers : Power up RSR */
-#define POWER_UP_RSR (0x1U << 4)
-/* Card Control Registers : Power down RSR */
-#define POWER_DOWN_RSR (0x1U << 3)
-
-/* Card Control Registers : Miscellaneous Configuration Register */
-#define CARD_MISC_CFG_REG 0x6C
-
-/* Host F1 read base 0 */
-#define HOST_F1_RD_BASE_0 0x0040
-/* Host F1 read base 1 */
-#define HOST_F1_RD_BASE_1 0x0041
-/* Host F1 card ready */
-#define HOST_F1_CARD_RDY 0x0020
-
-/* Firmware status 0 register */
-#define CARD_FW_STATUS0_REG 0x60
-/* Firmware status 1 register */
-#define CARD_FW_STATUS1_REG 0x61
-/* Rx length register */
-#define CARD_RX_LEN_REG 0x62
-/* Rx unit register */
-#define CARD_RX_UNIT_REG 0x63
-
/* Max retry number of CMD53 write */
#define MAX_WRITE_IOMEM_RETRY 2
@@ -192,7 +130,8 @@
if (a->mpa_tx.start_port <= port) \
a->mpa_tx.ports |= (1<<(a->mpa_tx.pkt_cnt)); \
else \
- a->mpa_tx.ports |= (1<<(a->mpa_tx.pkt_cnt+1+(MAX_PORT - \
+ a->mpa_tx.ports |= (1<<(a->mpa_tx.pkt_cnt+1+ \
+ (a->max_ports - \
a->mp_end_port))); \
a->mpa_tx.pkt_cnt++; \
} while (0)
@@ -201,12 +140,6 @@
#define MP_TX_AGGR_PKT_LIMIT_REACHED(a) \
(a->mpa_tx.pkt_cnt == a->mpa_tx.pkt_aggr_limit)
-/* SDIO Tx aggregation port limit ? */
-#define MP_TX_AGGR_PORT_LIMIT_REACHED(a) ((a->curr_wr_port < \
- a->mpa_tx.start_port) && (((MAX_PORT - \
- a->mpa_tx.start_port) + a->curr_wr_port) >= \
- SDIO_MP_AGGR_DEF_PKT_LIMIT))
-
/* Reset SDIO Tx aggregation buffer parameters */
#define MP_TX_AGGR_BUF_RESET(a) do { \
a->mpa_tx.pkt_cnt = 0; \
@@ -219,12 +152,6 @@
#define MP_RX_AGGR_PKT_LIMIT_REACHED(a) \
(a->mpa_rx.pkt_cnt == a->mpa_rx.pkt_aggr_limit)
-/* SDIO Tx aggregation port limit ? */
-#define MP_RX_AGGR_PORT_LIMIT_REACHED(a) ((a->curr_rd_port < \
- a->mpa_rx.start_port) && (((MAX_PORT - \
- a->mpa_rx.start_port) + a->curr_rd_port) >= \
- SDIO_MP_AGGR_DEF_PKT_LIMIT))
-
/* SDIO Rx aggregation in progress ? */
#define MP_RX_AGGR_IN_PROGRESS(a) (a->mpa_rx.pkt_cnt > 0)
@@ -232,20 +159,6 @@
#define MP_RX_AGGR_BUF_HAS_ROOM(a, rx_len) \
((a->mpa_rx.buf_len+rx_len) <= a->mpa_rx.buf_size)
-/* Prepare to copy current packet from card to SDIO Rx aggregation buffer */
-#define MP_RX_AGGR_SETUP(a, skb, port) do { \
- a->mpa_rx.buf_len += skb->len; \
- if (!a->mpa_rx.pkt_cnt) \
- a->mpa_rx.start_port = port; \
- if (a->mpa_rx.start_port <= port) \
- a->mpa_rx.ports |= (1<<(a->mpa_rx.pkt_cnt)); \
- else \
- a->mpa_rx.ports |= (1<<(a->mpa_rx.pkt_cnt+1)); \
- a->mpa_rx.skb_arr[a->mpa_rx.pkt_cnt] = skb; \
- a->mpa_rx.len_arr[a->mpa_rx.pkt_cnt] = skb->len; \
- a->mpa_rx.pkt_cnt++; \
-} while (0)
-
/* Reset SDIO Rx aggregation buffer parameters */
#define MP_RX_AGGR_BUF_RESET(a) do { \
a->mpa_rx.pkt_cnt = 0; \
@@ -254,14 +167,13 @@
a->mpa_rx.start_port = 0; \
} while (0)
-
/* data structure for SDIO MPA TX */
struct mwifiex_sdio_mpa_tx {
/* multiport tx aggregation buffer pointer */
u8 *buf;
u32 buf_len;
u32 pkt_cnt;
- u16 ports;
+ u32 ports;
u16 start_port;
u8 enabled;
u32 buf_size;
@@ -272,11 +184,11 @@ struct mwifiex_sdio_mpa_rx {
u8 *buf;
u32 buf_len;
u32 pkt_cnt;
- u16 ports;
+ u32 ports;
u16 start_port;
- struct sk_buff *skb_arr[SDIO_MP_AGGR_DEF_PKT_LIMIT];
- u32 len_arr[SDIO_MP_AGGR_DEF_PKT_LIMIT];
+ struct sk_buff **skb_arr;
+ u32 *len_arr;
u8 enabled;
u32 buf_size;
@@ -286,15 +198,47 @@ struct mwifiex_sdio_mpa_rx {
int mwifiex_bus_register(void);
void mwifiex_bus_unregister(void);
+struct mwifiex_sdio_card_reg {
+ u8 start_rd_port;
+ u8 start_wr_port;
+ u8 base_0_reg;
+ u8 base_1_reg;
+ u8 poll_reg;
+ u8 host_int_enable;
+ u8 status_reg_0;
+ u8 status_reg_1;
+ u8 sdio_int_mask;
+ u32 data_port_mask;
+ u8 max_mp_regs;
+ u8 rd_bitmap_l;
+ u8 rd_bitmap_u;
+ u8 rd_bitmap_1l;
+ u8 rd_bitmap_1u;
+ u8 wr_bitmap_l;
+ u8 wr_bitmap_u;
+ u8 wr_bitmap_1l;
+ u8 wr_bitmap_1u;
+ u8 rd_len_p0_l;
+ u8 rd_len_p0_u;
+ u8 card_misc_cfg_reg;
+};
+
struct sdio_mmc_card {
struct sdio_func *func;
struct mwifiex_adapter *adapter;
- u16 mp_rd_bitmap;
- u16 mp_wr_bitmap;
+ const char *firmware;
+ const struct mwifiex_sdio_card_reg *reg;
+ u8 max_ports;
+ u8 mp_agg_pkt_limit;
+ bool supports_sdio_new_mode;
+ bool has_control_mask;
+
+ u32 mp_rd_bitmap;
+ u32 mp_wr_bitmap;
u16 mp_end_port;
- u16 mp_data_port_mask;
+ u32 mp_data_port_mask;
u8 curr_rd_port;
u8 curr_wr_port;
@@ -305,6 +249,98 @@ struct sdio_mmc_card {
struct mwifiex_sdio_mpa_rx mpa_rx;
};
+struct mwifiex_sdio_device {
+ const char *firmware;
+ const struct mwifiex_sdio_card_reg *reg;
+ u8 max_ports;
+ u8 mp_agg_pkt_limit;
+ bool supports_sdio_new_mode;
+ bool has_control_mask;
+};
+
+static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = {
+ .start_rd_port = 1,
+ .start_wr_port = 1,
+ .base_0_reg = 0x0040,
+ .base_1_reg = 0x0041,
+ .poll_reg = 0x30,
+ .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK,
+ .status_reg_0 = 0x60,
+ .status_reg_1 = 0x61,
+ .sdio_int_mask = 0x3f,
+ .data_port_mask = 0x0000fffe,
+ .max_mp_regs = 64,
+ .rd_bitmap_l = 0x04,
+ .rd_bitmap_u = 0x05,
+ .wr_bitmap_l = 0x06,
+ .wr_bitmap_u = 0x07,
+ .rd_len_p0_l = 0x08,
+ .rd_len_p0_u = 0x09,
+ .card_misc_cfg_reg = 0x6c,
+};
+
+static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = {
+ .start_rd_port = 0,
+ .start_wr_port = 0,
+ .base_0_reg = 0x60,
+ .base_1_reg = 0x61,
+ .poll_reg = 0x50,
+ .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
+ CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
+ .status_reg_0 = 0xc0,
+ .status_reg_1 = 0xc1,
+ .sdio_int_mask = 0xff,
+ .data_port_mask = 0xffffffff,
+ .max_mp_regs = 184,
+ .rd_bitmap_l = 0x04,
+ .rd_bitmap_u = 0x05,
+ .rd_bitmap_1l = 0x06,
+ .rd_bitmap_1u = 0x07,
+ .wr_bitmap_l = 0x08,
+ .wr_bitmap_u = 0x09,
+ .wr_bitmap_1l = 0x0a,
+ .wr_bitmap_1u = 0x0b,
+ .rd_len_p0_l = 0x0c,
+ .rd_len_p0_u = 0x0d,
+ .card_misc_cfg_reg = 0xcc,
+};
+
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
+ .firmware = SD8786_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_sd87xx,
+ .max_ports = 16,
+ .mp_agg_pkt_limit = 8,
+ .supports_sdio_new_mode = false,
+ .has_control_mask = true,
+};
+
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
+ .firmware = SD8787_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_sd87xx,
+ .max_ports = 16,
+ .mp_agg_pkt_limit = 8,
+ .supports_sdio_new_mode = false,
+ .has_control_mask = true,
+};
+
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
+ .firmware = SD8797_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_sd87xx,
+ .max_ports = 16,
+ .mp_agg_pkt_limit = 8,
+ .supports_sdio_new_mode = false,
+ .has_control_mask = true,
+};
+
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
+ .firmware = SD8897_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_sd8897,
+ .max_ports = 32,
+ .mp_agg_pkt_limit = 16,
+ .supports_sdio_new_mode = true,
+ .has_control_mask = false,
+};
+
/*
* .cmdrsp_complete handler
*/
@@ -325,4 +361,77 @@ static inline int mwifiex_sdio_event_complete(struct mwifiex_adapter *adapter,
return 0;
}
+static inline bool
+mp_rx_aggr_port_limit_reached(struct sdio_mmc_card *card)
+{
+ u8 tmp;
+
+ if (card->curr_rd_port < card->mpa_rx.start_port) {
+ if (card->supports_sdio_new_mode)
+ tmp = card->mp_end_port >> 1;
+ else
+ tmp = card->mp_agg_pkt_limit;
+
+ if (((card->max_ports - card->mpa_rx.start_port) +
+ card->curr_rd_port) >= tmp)
+ return true;
+ }
+
+ if (!card->supports_sdio_new_mode)
+ return false;
+
+ if ((card->curr_rd_port - card->mpa_rx.start_port) >=
+ (card->mp_end_port >> 1))
+ return true;
+
+ return false;
+}
+
+static inline bool
+mp_tx_aggr_port_limit_reached(struct sdio_mmc_card *card)
+{
+ u16 tmp;
+
+ if (card->curr_wr_port < card->mpa_tx.start_port) {
+ if (card->supports_sdio_new_mode)
+ tmp = card->mp_end_port >> 1;
+ else
+ tmp = card->mp_agg_pkt_limit;
+
+ if (((card->max_ports - card->mpa_tx.start_port) +
+ card->curr_wr_port) >= tmp)
+ return true;
+ }
+
+ if (!card->supports_sdio_new_mode)
+ return false;
+
+ if ((card->curr_wr_port - card->mpa_tx.start_port) >=
+ (card->mp_end_port >> 1))
+ return true;
+
+ return false;
+}
+
+/* Prepare to copy current packet from card to SDIO Rx aggregation buffer */
+static inline void mp_rx_aggr_setup(struct sdio_mmc_card *card,
+ struct sk_buff *skb, u8 port)
+{
+ card->mpa_rx.buf_len += skb->len;
+
+ if (!card->mpa_rx.pkt_cnt)
+ card->mpa_rx.start_port = port;
+
+ if (card->supports_sdio_new_mode) {
+ card->mpa_rx.ports |= (1 << port);
+ } else {
+ if (card->mpa_rx.start_port <= port)
+ card->mpa_rx.ports |= 1 << (card->mpa_rx.pkt_cnt);
+ else
+ card->mpa_rx.ports |= 1 << (card->mpa_rx.pkt_cnt + 1);
+ }
+ card->mpa_rx.skb_arr[card->mpa_rx.pkt_cnt] = skb;
+ card->mpa_rx.len_arr[card->mpa_rx.pkt_cnt] = skb->len;
+ card->mpa_rx.pkt_cnt++;
+}
#endif /* _MWIFIEX_SDIO_H */
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index b193e25977d2..8ece48580642 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -1134,6 +1134,55 @@ mwifiex_cmd_mef_cfg(struct mwifiex_private *priv,
return 0;
}
+/* This function parse cal data from ASCII to hex */
+static u32 mwifiex_parse_cal_cfg(u8 *src, size_t len, u8 *dst)
+{
+ u8 *s = src, *d = dst;
+
+ while (s - src < len) {
+ if (*s && (isspace(*s) || *s == '\t')) {
+ s++;
+ continue;
+ }
+ if (isxdigit(*s)) {
+ *d++ = simple_strtol(s, NULL, 16);
+ s += 2;
+ } else {
+ s++;
+ }
+ }
+
+ return d - dst;
+}
+
+/* This function prepares command of set_cfg_data. */
+static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action)
+{
+ struct host_cmd_ds_802_11_cfg_data *cfg_data = &cmd->params.cfg_data;
+ struct mwifiex_adapter *adapter = priv->adapter;
+ u32 len, cal_data_offset;
+ u8 *tmp_cmd = (u8 *)cmd;
+
+ cal_data_offset = S_DS_GEN + sizeof(*cfg_data);
+ if ((adapter->cal_data->data) && (adapter->cal_data->size > 0))
+ len = mwifiex_parse_cal_cfg((u8 *)adapter->cal_data->data,
+ adapter->cal_data->size,
+ (u8 *)(tmp_cmd + cal_data_offset));
+ else
+ return -1;
+
+ cfg_data->action = cpu_to_le16(cmd_action);
+ cfg_data->type = cpu_to_le16(CFG_DATA_TYPE_CAL);
+ cfg_data->data_len = cpu_to_le16(len);
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_CFG_DATA);
+ cmd->size = cpu_to_le16(S_DS_GEN + sizeof(*cfg_data) + len);
+
+ return 0;
+}
+
/*
* This function prepares the commands before sending them to the firmware.
*
@@ -1152,6 +1201,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
case HostCmd_CMD_GET_HW_SPEC:
ret = mwifiex_cmd_get_hw_spec(priv, cmd_ptr);
break;
+ case HostCmd_CMD_CFG_DATA:
+ ret = mwifiex_cmd_cfg_data(priv, cmd_ptr, cmd_action);
+ break;
case HostCmd_CMD_MAC_CONTROL:
ret = mwifiex_cmd_mac_control(priv, cmd_ptr, cmd_action,
data_buf);
@@ -1384,6 +1436,7 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
*/
int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
{
+ struct mwifiex_adapter *adapter = priv->adapter;
int ret;
u16 enable = true;
struct mwifiex_ds_11n_amsdu_aggr_ctrl amsdu_aggr_ctrl;
@@ -1404,6 +1457,15 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
HostCmd_ACT_GEN_SET, 0, NULL);
if (ret)
return -1;
+
+ /* Download calibration data to firmware */
+ if (adapter->cal_data) {
+ ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_CFG_DATA,
+ HostCmd_ACT_GEN_SET, 0, NULL);
+ if (ret)
+ return -1;
+ }
+
/* Read MAC address from HW */
ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_GET_HW_SPEC,
HostCmd_ACT_GEN_GET, 0, NULL);
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 9f990e14966e..d85df158cc6c 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -818,6 +818,18 @@ static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv,
return 0;
}
+/* This function handles the command response of set_cfg_data */
+static int mwifiex_ret_cfg_data(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp)
+{
+ if (resp->result != HostCmd_RESULT_OK) {
+ dev_err(priv->adapter->dev, "Cal data cmd resp failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
/*
* This function handles the command responses.
*
@@ -841,6 +853,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
case HostCmd_CMD_GET_HW_SPEC:
ret = mwifiex_ret_get_hw_spec(priv, resp);
break;
+ case HostCmd_CMD_CFG_DATA:
+ ret = mwifiex_ret_cfg_data(priv, resp);
+ break;
case HostCmd_CMD_MAC_CONTROL:
break;
case HostCmd_CMD_802_11_MAC_ADDRESS:
@@ -978,6 +993,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
case HostCmd_CMD_UAP_BSS_STOP:
priv->bss_started = 0;
break;
+ case HostCmd_CMD_UAP_STA_DEAUTH:
+ break;
case HostCmd_CMD_MEF_CFG:
break;
default:
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 41aafc7454ed..ea265ec0e522 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -427,6 +427,17 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
break;
+ case EVENT_CHANNEL_SWITCH_ANN:
+ dev_dbg(adapter->dev, "event: Channel Switch Announcement\n");
+ priv->csa_expire_time =
+ jiffies + msecs_to_jiffies(DFS_CHAN_MOVE_TIME);
+ priv->csa_chan = priv->curr_bss_params.bss_descriptor.channel;
+ ret = mwifiex_send_cmd_async(priv,
+ HostCmd_CMD_802_11_DEAUTHENTICATE,
+ HostCmd_ACT_GEN_SET, 0,
+ priv->curr_bss_params.bss_descriptor.mac_address);
+ break;
+
default:
dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
eventcause);
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 1a8a19dbd635..8af97abf7108 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -104,16 +104,14 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
} else {
priv->curr_pkt_filter &=
~HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE;
- if (mcast_list->num_multicast_addr) {
- dev_dbg(priv->adapter->dev,
- "info: Set multicast list=%d\n",
- mcast_list->num_multicast_addr);
- /* Send multicast addresses to firmware */
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_MAC_MULTICAST_ADR,
- HostCmd_ACT_GEN_SET, 0,
- mcast_list);
- }
+ dev_dbg(priv->adapter->dev,
+ "info: Set multicast list=%d\n",
+ mcast_list->num_multicast_addr);
+ /* Send multicast addresses to firmware */
+ ret = mwifiex_send_cmd_async(priv,
+ HostCmd_CMD_MAC_MULTICAST_ADR,
+ HostCmd_ACT_GEN_SET, 0,
+ mcast_list);
}
}
dev_dbg(priv->adapter->dev,
@@ -180,6 +178,9 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
*/
bss_desc->disable_11ac = true;
+ if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_SPECTRUM_MGMT)
+ bss_desc->sensed_11h = true;
+
return mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc);
}
@@ -256,31 +257,38 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
goto done;
}
- if (priv->bss_mode == NL80211_IFTYPE_STATION) {
- /* Infra mode */
+ if (priv->bss_mode == NL80211_IFTYPE_STATION ||
+ priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
+ u8 config_bands;
+
ret = mwifiex_deauthenticate(priv, NULL);
if (ret)
goto done;
- if (bss_desc) {
- u8 config_bands = 0;
+ if (!bss_desc)
+ return -1;
- if (mwifiex_band_to_radio_type((u8) bss_desc->bss_band)
- == HostCmd_SCAN_RADIO_TYPE_BG)
- config_bands = BAND_B | BAND_G | BAND_GN |
- BAND_GAC;
- else
- config_bands = BAND_A | BAND_AN | BAND_AAC;
+ if (mwifiex_band_to_radio_type(bss_desc->bss_band) ==
+ HostCmd_SCAN_RADIO_TYPE_BG)
+ config_bands = BAND_B | BAND_G | BAND_GN | BAND_GAC;
+ else
+ config_bands = BAND_A | BAND_AN | BAND_AAC;
- if (!((config_bands | adapter->fw_bands) &
- ~adapter->fw_bands))
- adapter->config_bands = config_bands;
- }
+ if (!((config_bands | adapter->fw_bands) & ~adapter->fw_bands))
+ adapter->config_bands = config_bands;
ret = mwifiex_check_network_compatibility(priv, bss_desc);
if (ret)
goto done;
+ if (mwifiex_11h_get_csa_closed_channel(priv) ==
+ (u8)bss_desc->channel) {
+ dev_err(adapter->dev,
+ "Attempt to reconnect on csa closed chan(%d)\n",
+ bss_desc->channel);
+ goto done;
+ }
+
dev_dbg(adapter->dev, "info: SSID found in scan list ... "
"associating...\n");
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index b04b1db29100..2de882dead0f 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -689,6 +689,23 @@ mwifiex_cmd_uap_sys_config(struct host_cmd_ds_command *cmd, u16 cmd_action,
return 0;
}
+/* This function prepares AP specific deauth command with mac supplied in
+ * function parameter.
+ */
+static int mwifiex_cmd_uap_sta_deauth(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd, u8 *mac)
+{
+ struct host_cmd_ds_sta_deauth *sta_deauth = &cmd->params.sta_deauth;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_UAP_STA_DEAUTH);
+ memcpy(sta_deauth->mac, mac, ETH_ALEN);
+ sta_deauth->reason = cpu_to_le16(WLAN_REASON_DEAUTH_LEAVING);
+
+ cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_sta_deauth) +
+ S_DS_GEN);
+ return 0;
+}
+
/* This function prepares the AP specific commands before sending them
* to the firmware.
* This is a generic function which calls specific command preparation
@@ -710,6 +727,10 @@ int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no,
cmd->command = cpu_to_le16(cmd_no);
cmd->size = cpu_to_le16(S_DS_GEN);
break;
+ case HostCmd_CMD_UAP_STA_DEAUTH:
+ if (mwifiex_cmd_uap_sta_deauth(priv, cmd, data_buf))
+ return -1;
+ break;
default:
dev_err(priv->adapter->dev,
"PREP_CMD: unknown cmd %#x\n", cmd_no);
diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c
index 21c640d3b579..718066577c6c 100644
--- a/drivers/net/wireless/mwifiex/uap_event.c
+++ b/drivers/net/wireless/mwifiex/uap_event.c
@@ -107,18 +107,15 @@ mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
*/
static void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac)
{
- struct mwifiex_sta_node *node, *tmp;
+ struct mwifiex_sta_node *node;
unsigned long flags;
spin_lock_irqsave(&priv->sta_list_spinlock, flags);
node = mwifiex_get_sta_entry(priv, mac);
if (node) {
- list_for_each_entry_safe(node, tmp, &priv->sta_list,
- list) {
- list_del(&node->list);
- kfree(node);
- }
+ list_del(&node->list);
+ kfree(node);
}
spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
@@ -295,3 +292,19 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
return 0;
}
+
+/* This function deletes station entry from associated station list.
+ * Also if both AP and STA are 11n enabled, RxReorder tables and TxBA stream
+ * tables created for this station are deleted.
+ */
+void mwifiex_uap_del_sta_data(struct mwifiex_private *priv,
+ struct mwifiex_sta_node *node)
+{
+ if (priv->ap_11n_enabled && node->is_11n_enabled) {
+ mwifiex_11n_del_rx_reorder_tbl_by_ta(priv, node->mac_addr);
+ mwifiex_del_tx_ba_stream_tbl_by_ra(priv, node->mac_addr);
+ }
+ mwifiex_del_sta_entry(priv, node->mac_addr);
+
+ return;
+}
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 4be3d33ceae8..944e8846f6fc 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -37,6 +37,9 @@
/* Offset for TOS field in the IP header */
#define IPTOS_OFFSET 5
+static bool enable_tx_amsdu;
+module_param(enable_tx_amsdu, bool, 0644);
+
/* WMM information IE */
static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07,
0x00, 0x50, 0xf2, 0x02,
@@ -1233,7 +1236,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
mwifiex_send_delba(priv, tid_del, ra, 1);
}
}
- if (mwifiex_is_amsdu_allowed(priv, tid) &&
+ if (enable_tx_amsdu && mwifiex_is_amsdu_allowed(priv, tid) &&
mwifiex_is_11n_aggragation_possible(priv, ptr,
adapter->tx_buf_size))
mwifiex_11n_aggregate_pkt(priv, ptr, INTF_HEADER_LEN,
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 6820fce4016b..a3707fd4ef62 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -1548,7 +1548,7 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
if (!priv->pending_tx_pkts)
return 0;
- retry = 0;
+ retry = 1;
rc = 0;
spin_lock_bh(&priv->tx_lock);
@@ -1572,13 +1572,19 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
spin_lock_bh(&priv->tx_lock);
- if (timeout) {
+ if (timeout || !priv->pending_tx_pkts) {
WARN_ON(priv->pending_tx_pkts);
if (retry)
wiphy_notice(hw->wiphy, "tx rings drained\n");
break;
}
+ if (retry) {
+ mwl8k_tx_start(priv);
+ retry = 0;
+ continue;
+ }
+
if (priv->pending_tx_pkts < oldcount) {
wiphy_notice(hw->wiphy,
"waiting for tx rings to drain (%d -> %d pkts)\n",
@@ -2055,6 +2061,7 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw,
mwl8k_remove_stream(hw, stream);
spin_unlock(&priv->stream_lock);
}
+ mwl8k_tx_start(priv);
spin_unlock_bh(&priv->tx_lock);
pci_unmap_single(priv->pdev, dma, skb->len,
PCI_DMA_TODEVICE);
diff --git a/drivers/net/wireless/orinoco/orinoco_pci.h b/drivers/net/wireless/orinoco/orinoco_pci.h
index ea7231af40a8..43f5b9f5a0b0 100644
--- a/drivers/net/wireless/orinoco/orinoco_pci.h
+++ b/drivers/net/wireless/orinoco/orinoco_pci.h
@@ -38,7 +38,7 @@ static int orinoco_pci_resume(struct pci_dev *pdev)
struct net_device *dev = priv->ndev;
int err;
- pci_set_power_state(pdev, 0);
+ pci_set_power_state(pdev, PCI_D0);
err = pci_enable_device(pdev);
if (err) {
printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index 1f9cb55c3360..bdfe637953f4 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -881,7 +881,8 @@ static int ezusb_access_ltv(struct ezusb_priv *upriv,
if (!upriv->udev) {
dbg("Device disconnected");
- return -ENODEV;
+ retval = -ENODEV;
+ goto exit;
}
if (upriv->read_urb->status != -EINPROGRESS)
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index 978e7eb26567..7fc46f26cf2b 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -42,8 +42,7 @@
MODULE_FIRMWARE("3826.arm");
-/*
- * gpios should be handled in board files and provided via platform data,
+/* gpios should be handled in board files and provided via platform data,
* but because it's currently impossible for p54spi to have a header file
* in include/linux, let's use module paramaters for now
*/
@@ -191,8 +190,7 @@ static int p54spi_request_eeprom(struct ieee80211_hw *dev)
const struct firmware *eeprom;
int ret;
- /*
- * allow users to customize their eeprom.
+ /* allow users to customize their eeprom.
*/
ret = request_firmware(&eeprom, "3826.eeprom", &priv->spi->dev);
@@ -285,8 +283,7 @@ static void p54spi_power_on(struct p54s_priv *priv)
gpio_set_value(p54spi_gpio_power, 1);
enable_irq(gpio_to_irq(p54spi_gpio_irq));
- /*
- * need to wait a while before device can be accessed, the length
+ /* need to wait a while before device can be accessed, the length
* is just a guess
*/
msleep(10);
@@ -365,7 +362,8 @@ static int p54spi_rx(struct p54s_priv *priv)
/* Firmware may insert up to 4 padding bytes after the lmac header,
* but it does not amend the size of SPI data transfer.
* Such packets has correct data size in header, thus referencing
- * past the end of allocated skb. Reserve extra 4 bytes for this case */
+ * past the end of allocated skb. Reserve extra 4 bytes for this case
+ */
skb = dev_alloc_skb(len + 4);
if (!skb) {
p54spi_sleep(priv);
@@ -383,7 +381,8 @@ static int p54spi_rx(struct p54s_priv *priv)
}
p54spi_sleep(priv);
/* Put additional bytes to compensate for the possible
- * alignment-caused truncation */
+ * alignment-caused truncation
+ */
skb_put(skb, 4);
if (p54_rx(priv->hw, skb) == 0)
@@ -713,27 +712,7 @@ static struct spi_driver p54spi_driver = {
.remove = p54spi_remove,
};
-static int __init p54spi_init(void)
-{
- int ret;
-
- ret = spi_register_driver(&p54spi_driver);
- if (ret < 0) {
- printk(KERN_ERR "failed to register SPI driver: %d", ret);
- goto out;
- }
-
-out:
- return ret;
-}
-
-static void __exit p54spi_exit(void)
-{
- spi_unregister_driver(&p54spi_driver);
-}
-
-module_init(p54spi_init);
-module_exit(p54spi_exit);
+module_spi_driver(p54spi_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>");
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 9b915d3a44be..3e60a31582f8 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -1,6 +1,6 @@
menuconfig RT2X00
tristate "Ralink driver support"
- depends on MAC80211
+ depends on MAC80211 && HAS_DMA
---help---
This will enable the support for the Ralink drivers,
developed in the rt2x00 project <http://rt2x00.serialmonkey.com>.
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index f7143733d7e9..3d53a09da5a1 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1767,33 +1767,45 @@ static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
.config = rt2400pci_config,
};
-static const struct data_queue_desc rt2400pci_queue_rx = {
- .entry_num = 24,
- .data_size = DATA_FRAME_SIZE,
- .desc_size = RXD_DESC_SIZE,
- .priv_size = sizeof(struct queue_entry_priv_mmio),
-};
+static void rt2400pci_queue_init(struct data_queue *queue)
+{
+ switch (queue->qid) {
+ case QID_RX:
+ queue->limit = 24;
+ queue->data_size = DATA_FRAME_SIZE;
+ queue->desc_size = RXD_DESC_SIZE;
+ queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+ break;
-static const struct data_queue_desc rt2400pci_queue_tx = {
- .entry_num = 24,
- .data_size = DATA_FRAME_SIZE,
- .desc_size = TXD_DESC_SIZE,
- .priv_size = sizeof(struct queue_entry_priv_mmio),
-};
+ case QID_AC_VO:
+ case QID_AC_VI:
+ case QID_AC_BE:
+ case QID_AC_BK:
+ queue->limit = 24;
+ queue->data_size = DATA_FRAME_SIZE;
+ queue->desc_size = TXD_DESC_SIZE;
+ queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+ break;
-static const struct data_queue_desc rt2400pci_queue_bcn = {
- .entry_num = 1,
- .data_size = MGMT_FRAME_SIZE,
- .desc_size = TXD_DESC_SIZE,
- .priv_size = sizeof(struct queue_entry_priv_mmio),
-};
+ case QID_BEACON:
+ queue->limit = 1;
+ queue->data_size = MGMT_FRAME_SIZE;
+ queue->desc_size = TXD_DESC_SIZE;
+ queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+ break;
-static const struct data_queue_desc rt2400pci_queue_atim = {
- .entry_num = 8,
- .data_size = DATA_FRAME_SIZE,
- .desc_size = TXD_DESC_SIZE,
- .priv_size = sizeof(struct queue_entry_priv_mmio),
-};
+ case QID_ATIM:
+ queue->limit = 8;
+ queue->data_size = DATA_FRAME_SIZE;
+ queue->desc_size = TXD_DESC_SIZE;
+ queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+}
static const struct rt2x00_ops rt2400pci_ops = {
.name = KBUILD_MODNAME,
@@ -1801,11 +1813,7 @@ static const struct rt2x00_ops rt2400pci_ops = {
.eeprom_size = EEPROM_SIZE,
.rf_size = RF_SIZE,
.tx_queues = NUM_TX_QUEUES,
- .extra_tx_headroom = 0,
- .rx = &rt2400pci_queue_rx,
- .tx = &rt2400pci_queue_tx,
- .bcn = &rt2400pci_queue_bcn,
- .atim = &rt2400pci_queue_atim,
+ .queue_init = rt2400pci_queue_init,
.lib = &rt2400pci_rt2x00_ops,
.hw = &rt2400pci_mac80211_ops,
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 77e45b223d15..0ac5c589ddce 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -2056,33 +2056,45 @@ static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
.config = rt2500pci_config,
};
-static const struct data_queue_desc rt2500pci_queue_rx = {
- .entry_num = 32,
- .data_size = DATA_FRAME_SIZE,
- .desc_size = RXD_DESC_SIZE,
- .priv_size = sizeof(struct queue_entry_priv_mmio),
-};
+static void rt2500pci_queue_init(struct data_queue *queue)
+{
+ switch (queue->qid) {
+ case QID_RX:
+ queue->limit = 32;
+ queue->data_size = DATA_FRAME_SIZE;
+ queue->desc_size = RXD_DESC_SIZE;
+ queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+ break;
-static const struct data_queue_desc rt2500pci_queue_tx = {
- .entry_num = 32,
- .data_size = DATA_FRAME_SIZE,
- .desc_size = TXD_DESC_SIZE,
- .priv_size = sizeof(struct queue_entry_priv_mmio),
-};
+ case QID_AC_VO:
+ case QID_AC_VI:
+ case QID_AC_BE:
+ case QID_AC_BK:
+ queue->limit = 32;
+ queue->data_size = DATA_FRAME_SIZE;
+ queue->desc_size = TXD_DESC_SIZE;
+ queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+ break;
-static const struct data_queue_desc rt2500pci_queue_bcn = {
- .entry_num = 1,
- .data_size = MGMT_FRAME_SIZE,
- .desc_size = TXD_DESC_SIZE,
- .priv_size = sizeof(struct queue_entry_priv_mmio),
-};
+ case QID_BEACON:
+ queue->limit = 1;
+ queue->data_size = MGMT_FRAME_SIZE;
+ queue->desc_size = TXD_DESC_SIZE;
+ queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+ break;
-static const struct data_queue_desc rt2500pci_queue_atim = {
- .entry_num = 8,
- .data_size = DATA_FRAME_SIZE,
- .desc_size = TXD_DESC_SIZE,
- .priv_size = sizeof(struct queue_entry_priv_mmio),
-};
+ case QID_ATIM:
+ queue->limit = 8;
+ queue->data_size = DATA_FRAME_SIZE;
+ queue->desc_size = TXD_DESC_SIZE;
+ queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+}
static const struct rt2x00_ops rt2500pci_ops = {
.name = KBUILD_MODNAME,
@@ -2090,11 +2102,7 @@ static const struct rt2x00_ops rt2500pci_ops = {
.eeprom_size = EEPROM_SIZE,
.rf_size = RF_SIZE,
.tx_queues = NUM_TX_QUEUES,
- .extra_tx_headroom = 0,
- .rx = &rt2500pci_queue_rx,
- .tx = &rt2500pci_queue_tx,
- .bcn = &rt2500pci_queue_bcn,
- .atim = &rt2500pci_queue_atim,
+ .queue_init = rt2500pci_queue_init,
.lib = &rt2500pci_rt2x00_ops,
.hw = &rt2500pci_mac80211_ops,
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index a7f7b365eff4..85acc79f68b8 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1867,33 +1867,45 @@ static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
.config = rt2500usb_config,
};
-static const struct data_queue_desc rt2500usb_queue_rx = {
- .entry_num = 32,
- .data_size = DATA_FRAME_SIZE,
- .desc_size = RXD_DESC_SIZE,
- .priv_size = sizeof(struct queue_entry_priv_usb),
-};
+static void rt2500usb_queue_init(struct data_queue *queue)
+{
+ switch (queue->qid) {
+ case QID_RX:
+ queue->limit = 32;
+ queue->data_size = DATA_FRAME_SIZE;
+ queue->desc_size = RXD_DESC_SIZE;
+ queue->priv_size = sizeof(struct queue_entry_priv_usb);
+ break;
-static const struct data_queue_desc rt2500usb_queue_tx = {
- .entry_num = 32,
- .data_size = DATA_FRAME_SIZE,
- .desc_size = TXD_DESC_SIZE,
- .priv_size = sizeof(struct queue_entry_priv_usb),
-};
+ case QID_AC_VO:
+ case QID_AC_VI:
+ case QID_AC_BE:
+ case QID_AC_BK:
+ queue->limit = 32;
+ queue->data_size = DATA_FRAME_SIZE;
+ queue->desc_size = TXD_DESC_SIZE;
+ queue->priv_size = sizeof(struct queue_entry_priv_usb);
+ break;
-static const struct data_queue_desc rt2500usb_queue_bcn = {
- .entry_num = 1,
- .data_size = MGMT_FRAME_SIZE,
- .desc_size = TXD_DESC_SIZE,
- .priv_size = sizeof(struct queue_entry_priv_usb_bcn),
-};
+ case QID_BEACON:
+ queue->limit = 1;
+ queue->data_size = MGMT_FRAME_SIZE;
+ queue->desc_size = TXD_DESC_SIZE;
+ queue->priv_size = sizeof(struct queue_entry_priv_usb_bcn);
+ break;
-static const struct data_queue_desc rt2500usb_queue_atim = {
- .entry_num = 8,
- .data_size = DATA_FRAME_SIZE,
- .desc_size = TXD_DESC_SIZE,
- .priv_size = sizeof(struct queue_entry_priv_usb),
-};
+ case QID_ATIM:
+ queue->limit = 8;
+ queue->data_size = DATA_FRAME_SIZE;
+ queue->desc_size = TXD_DESC_SIZE;
+ queue->priv_size = sizeof(struct queue_entry_priv_usb);
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+}
static const struct rt2x00_ops rt2500usb_ops = {
.name = KBUILD_MODNAME,
@@ -1901,11 +1913,7 @@ static const struct rt2x00_ops rt2500usb_ops = {
.eeprom_size = EEPROM_SIZE,
.rf_size = RF_SIZE,
.tx_queues = NUM_TX_QUEUES,
- .extra_tx_headroom = TXD_DESC_SIZE,
- .rx = &rt2500usb_queue_rx,
- .tx = &rt2500usb_queue_tx,
- .bcn = &rt2500usb_queue_bcn,
- .atim = &rt2500usb_queue_atim,
+ .queue_init = rt2500usb_queue_init,
.lib = &rt2500usb_rt2x00_ops,
.hw = &rt2500usb_mac80211_ops,
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index a7630d5ec892..d78c495a86a0 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -100,7 +100,7 @@
#define CSR_REG_BASE 0x1000
#define CSR_REG_SIZE 0x0800
#define EEPROM_BASE 0x0000
-#define EEPROM_SIZE 0x0110
+#define EEPROM_SIZE 0x0200
#define BBP_BASE 0x0000
#define BBP_SIZE 0x00ff
#define RF_BASE 0x0004
@@ -2625,11 +2625,13 @@ struct mac_iveiv_entry {
/*
* DMA descriptor defines.
*/
-#define TXWI_DESC_SIZE (4 * sizeof(__le32))
-#define RXWI_DESC_SIZE (4 * sizeof(__le32))
-#define TXWI_DESC_SIZE_5592 (5 * sizeof(__le32))
-#define RXWI_DESC_SIZE_5592 (6 * sizeof(__le32))
+#define TXWI_DESC_SIZE_4WORDS (4 * sizeof(__le32))
+#define TXWI_DESC_SIZE_5WORDS (5 * sizeof(__le32))
+
+#define RXWI_DESC_SIZE_4WORDS (4 * sizeof(__le32))
+#define RXWI_DESC_SIZE_6WORDS (6 * sizeof(__le32))
+
/*
* TX WI structure
*/
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 72f32e5caa4d..1f80ea5e29dd 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -840,7 +840,7 @@ static inline void rt2800_clear_beacon_register(struct rt2x00_dev *rt2x00dev,
unsigned int beacon_base)
{
int i;
- const int txwi_desc_size = rt2x00dev->ops->bcn->winfo_size;
+ const int txwi_desc_size = rt2x00dev->bcn->winfo_size;
/*
* For the Beacon base registers we only need to clear
@@ -2392,7 +2392,7 @@ static void rt2800_config_channel_rf55xx(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr);
- if (info->default_power1 > power_bound)
+ if (info->default_power2 > power_bound)
rt2x00_set_field8(&rfcsr, RFCSR50_TX, power_bound);
else
rt2x00_set_field8(&rfcsr, RFCSR50_TX, info->default_power2);
@@ -2678,30 +2678,53 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
tx_pin = 0;
- /* Turn on unused PA or LNA when not using 1T or 1R */
- if (rt2x00dev->default_ant.tx_chain_num == 2) {
+ switch (rt2x00dev->default_ant.tx_chain_num) {
+ case 3:
+ /* Turn on tertiary PAs */
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A2_EN,
+ rf->channel > 14);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G2_EN,
+ rf->channel <= 14);
+ /* fall-through */
+ case 2:
+ /* Turn on secondary PAs */
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN,
rf->channel > 14);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN,
rf->channel <= 14);
+ /* fall-through */
+ case 1:
+ /* Turn on primary PAs */
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN,
+ rf->channel > 14);
+ if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags))
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, 1);
+ else
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN,
+ rf->channel <= 14);
+ break;
}
- /* Turn on unused PA or LNA when not using 1T or 1R */
- if (rt2x00dev->default_ant.rx_chain_num == 2) {
+ switch (rt2x00dev->default_ant.rx_chain_num) {
+ case 3:
+ /* Turn on tertiary LNAs */
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN, 1);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN, 1);
+ /* fall-through */
+ case 2:
+ /* Turn on secondary LNAs */
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
+ /* fall-through */
+ case 1:
+ /* Turn on primary LNAs */
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1);
+ break;
}
- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1);
- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFTR_EN, 1);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_TRSW_EN, 1);
- if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags))
- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, 1);
- else
- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN,
- rf->channel <= 14);
- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN, rf->channel > 14);
rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
@@ -3960,379 +3983,577 @@ static void rt2800_init_bbp_early(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 106, 0x35);
}
-static void rt2800_init_bbp_5592(struct rt2x00_dev *rt2x00dev)
+static void rt2800_disable_unused_dac_adc(struct rt2x00_dev *rt2x00dev)
{
- int ant, div_mode;
u16 eeprom;
u8 value;
- rt2800_init_bbp_early(rt2x00dev);
+ rt2800_bbp_read(rt2x00dev, 138, &value);
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
+ value |= 0x20;
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
+ value &= ~0x02;
+ rt2800_bbp_write(rt2x00dev, 138, value);
+}
- rt2800_bbp_read(rt2x00dev, 105, &value);
- rt2x00_set_field8(&value, BBP105_MLD,
- rt2x00dev->default_ant.rx_chain_num == 2);
- rt2800_bbp_write(rt2x00dev, 105, value);
+static void rt2800_init_bbp_305x_soc(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_bbp_write(rt2x00dev, 31, 0x08);
+
+ rt2800_bbp_write(rt2x00dev, 65, 0x2c);
+ rt2800_bbp_write(rt2x00dev, 66, 0x38);
+
+ rt2800_bbp_write(rt2x00dev, 69, 0x12);
+ rt2800_bbp_write(rt2x00dev, 73, 0x10);
+
+ rt2800_bbp_write(rt2x00dev, 70, 0x0a);
+
+ rt2800_bbp_write(rt2x00dev, 78, 0x0e);
+ rt2800_bbp_write(rt2x00dev, 80, 0x08);
+
+ rt2800_bbp_write(rt2x00dev, 82, 0x62);
+
+ rt2800_bbp_write(rt2x00dev, 83, 0x6a);
+
+ rt2800_bbp_write(rt2x00dev, 84, 0x99);
+
+ rt2800_bbp_write(rt2x00dev, 86, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 91, 0x04);
+
+ rt2800_bbp_write(rt2x00dev, 92, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 103, 0xc0);
+
+ rt2800_bbp_write(rt2x00dev, 105, 0x01);
+
+ rt2800_bbp_write(rt2x00dev, 106, 0x35);
+}
+
+static void rt2800_init_bbp_28xx(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_bbp_write(rt2x00dev, 65, 0x2c);
+ rt2800_bbp_write(rt2x00dev, 66, 0x38);
+
+ if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) {
+ rt2800_bbp_write(rt2x00dev, 69, 0x16);
+ rt2800_bbp_write(rt2x00dev, 73, 0x12);
+ } else {
+ rt2800_bbp_write(rt2x00dev, 69, 0x12);
+ rt2800_bbp_write(rt2x00dev, 73, 0x10);
+ }
+
+ rt2800_bbp_write(rt2x00dev, 70, 0x0a);
+
+ rt2800_bbp_write(rt2x00dev, 81, 0x37);
+
+ rt2800_bbp_write(rt2x00dev, 82, 0x62);
+
+ rt2800_bbp_write(rt2x00dev, 83, 0x6a);
+
+ if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D))
+ rt2800_bbp_write(rt2x00dev, 84, 0x19);
+ else
+ rt2800_bbp_write(rt2x00dev, 84, 0x99);
+
+ rt2800_bbp_write(rt2x00dev, 86, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 91, 0x04);
+
+ rt2800_bbp_write(rt2x00dev, 92, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 103, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 105, 0x05);
+
+ rt2800_bbp_write(rt2x00dev, 106, 0x35);
+}
+
+static void rt2800_init_bbp_30xx(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_bbp_write(rt2x00dev, 65, 0x2c);
+ rt2800_bbp_write(rt2x00dev, 66, 0x38);
+
+ rt2800_bbp_write(rt2x00dev, 69, 0x12);
+ rt2800_bbp_write(rt2x00dev, 73, 0x10);
+
+ rt2800_bbp_write(rt2x00dev, 70, 0x0a);
+
+ rt2800_bbp_write(rt2x00dev, 79, 0x13);
+ rt2800_bbp_write(rt2x00dev, 80, 0x05);
+ rt2800_bbp_write(rt2x00dev, 81, 0x33);
+
+ rt2800_bbp_write(rt2x00dev, 82, 0x62);
+
+ rt2800_bbp_write(rt2x00dev, 83, 0x6a);
+
+ rt2800_bbp_write(rt2x00dev, 84, 0x99);
+
+ rt2800_bbp_write(rt2x00dev, 86, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 91, 0x04);
+
+ rt2800_bbp_write(rt2x00dev, 92, 0x00);
+
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) ||
+ rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) ||
+ rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E))
+ rt2800_bbp_write(rt2x00dev, 103, 0xc0);
+ else
+ rt2800_bbp_write(rt2x00dev, 103, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 105, 0x05);
+
+ rt2800_bbp_write(rt2x00dev, 106, 0x35);
+
+ if (rt2x00_rt(rt2x00dev, RT3071) ||
+ rt2x00_rt(rt2x00dev, RT3090))
+ rt2800_disable_unused_dac_adc(rt2x00dev);
+}
+
+static void rt2800_init_bbp_3290(struct rt2x00_dev *rt2x00dev)
+{
+ u8 value;
rt2800_bbp4_mac_if_ctrl(rt2x00dev);
- rt2800_bbp_write(rt2x00dev, 20, 0x06);
rt2800_bbp_write(rt2x00dev, 31, 0x08);
- rt2800_bbp_write(rt2x00dev, 65, 0x2C);
- rt2800_bbp_write(rt2x00dev, 68, 0xDD);
- rt2800_bbp_write(rt2x00dev, 69, 0x1A);
- rt2800_bbp_write(rt2x00dev, 70, 0x05);
+
+ rt2800_bbp_write(rt2x00dev, 65, 0x2c);
+ rt2800_bbp_write(rt2x00dev, 66, 0x38);
+
+ rt2800_bbp_write(rt2x00dev, 68, 0x0b);
+
+ rt2800_bbp_write(rt2x00dev, 69, 0x12);
rt2800_bbp_write(rt2x00dev, 73, 0x13);
- rt2800_bbp_write(rt2x00dev, 74, 0x0F);
- rt2800_bbp_write(rt2x00dev, 75, 0x4F);
+ rt2800_bbp_write(rt2x00dev, 75, 0x46);
rt2800_bbp_write(rt2x00dev, 76, 0x28);
+
+ rt2800_bbp_write(rt2x00dev, 77, 0x58);
+
+ rt2800_bbp_write(rt2x00dev, 70, 0x0a);
+
+ rt2800_bbp_write(rt2x00dev, 74, 0x0b);
+ rt2800_bbp_write(rt2x00dev, 79, 0x18);
+ rt2800_bbp_write(rt2x00dev, 80, 0x09);
+ rt2800_bbp_write(rt2x00dev, 81, 0x33);
+
+ rt2800_bbp_write(rt2x00dev, 82, 0x62);
+
+ rt2800_bbp_write(rt2x00dev, 83, 0x7a);
+
+ rt2800_bbp_write(rt2x00dev, 84, 0x9a);
+
+ rt2800_bbp_write(rt2x00dev, 86, 0x38);
+
+ rt2800_bbp_write(rt2x00dev, 91, 0x04);
+
+ rt2800_bbp_write(rt2x00dev, 92, 0x02);
+
+ rt2800_bbp_write(rt2x00dev, 103, 0xc0);
+
+ rt2800_bbp_write(rt2x00dev, 104, 0x92);
+
+ rt2800_bbp_write(rt2x00dev, 105, 0x1c);
+
+ rt2800_bbp_write(rt2x00dev, 106, 0x03);
+
+ rt2800_bbp_write(rt2x00dev, 128, 0x12);
+
+ rt2800_bbp_write(rt2x00dev, 67, 0x24);
+ rt2800_bbp_write(rt2x00dev, 143, 0x04);
+ rt2800_bbp_write(rt2x00dev, 142, 0x99);
+ rt2800_bbp_write(rt2x00dev, 150, 0x30);
+ rt2800_bbp_write(rt2x00dev, 151, 0x2e);
+ rt2800_bbp_write(rt2x00dev, 152, 0x20);
+ rt2800_bbp_write(rt2x00dev, 153, 0x34);
+ rt2800_bbp_write(rt2x00dev, 154, 0x40);
+ rt2800_bbp_write(rt2x00dev, 155, 0x3b);
+ rt2800_bbp_write(rt2x00dev, 253, 0x04);
+
+ rt2800_bbp_read(rt2x00dev, 47, &value);
+ rt2x00_set_field8(&value, BBP47_TSSI_ADC6, 1);
+ rt2800_bbp_write(rt2x00dev, 47, value);
+
+ /* Use 5-bit ADC for Acquisition and 8-bit ADC for data */
+ rt2800_bbp_read(rt2x00dev, 3, &value);
+ rt2x00_set_field8(&value, BBP3_ADC_MODE_SWITCH, 1);
+ rt2x00_set_field8(&value, BBP3_ADC_INIT_MODE, 1);
+ rt2800_bbp_write(rt2x00dev, 3, value);
+}
+
+static void rt2800_init_bbp_3352(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_bbp_write(rt2x00dev, 3, 0x00);
+ rt2800_bbp_write(rt2x00dev, 4, 0x50);
+
+ rt2800_bbp_write(rt2x00dev, 31, 0x08);
+
+ rt2800_bbp_write(rt2x00dev, 47, 0x48);
+
+ rt2800_bbp_write(rt2x00dev, 65, 0x2c);
+ rt2800_bbp_write(rt2x00dev, 66, 0x38);
+
+ rt2800_bbp_write(rt2x00dev, 68, 0x0b);
+
+ rt2800_bbp_write(rt2x00dev, 69, 0x12);
+ rt2800_bbp_write(rt2x00dev, 73, 0x13);
+ rt2800_bbp_write(rt2x00dev, 75, 0x46);
+ rt2800_bbp_write(rt2x00dev, 76, 0x28);
+
rt2800_bbp_write(rt2x00dev, 77, 0x59);
- rt2800_bbp_write(rt2x00dev, 84, 0x9A);
+
+ rt2800_bbp_write(rt2x00dev, 70, 0x0a);
+
+ rt2800_bbp_write(rt2x00dev, 78, 0x0e);
+ rt2800_bbp_write(rt2x00dev, 80, 0x08);
+ rt2800_bbp_write(rt2x00dev, 81, 0x37);
+
+ rt2800_bbp_write(rt2x00dev, 82, 0x62);
+
+ rt2800_bbp_write(rt2x00dev, 83, 0x6a);
+
+ rt2800_bbp_write(rt2x00dev, 84, 0x99);
+
rt2800_bbp_write(rt2x00dev, 86, 0x38);
+
rt2800_bbp_write(rt2x00dev, 88, 0x90);
+
rt2800_bbp_write(rt2x00dev, 91, 0x04);
+
rt2800_bbp_write(rt2x00dev, 92, 0x02);
- rt2800_bbp_write(rt2x00dev, 95, 0x9a);
- rt2800_bbp_write(rt2x00dev, 98, 0x12);
- rt2800_bbp_write(rt2x00dev, 103, 0xC0);
+
+ rt2800_bbp_write(rt2x00dev, 103, 0xc0);
+
rt2800_bbp_write(rt2x00dev, 104, 0x92);
- /* FIXME BBP105 owerwrite */
- rt2800_bbp_write(rt2x00dev, 105, 0x3C);
- rt2800_bbp_write(rt2x00dev, 106, 0x35);
- rt2800_bbp_write(rt2x00dev, 128, 0x12);
- rt2800_bbp_write(rt2x00dev, 134, 0xD0);
- rt2800_bbp_write(rt2x00dev, 135, 0xF6);
- rt2800_bbp_write(rt2x00dev, 137, 0x0F);
- /* Initialize GLRT (Generalized Likehood Radio Test) */
- rt2800_init_bbp_5592_glrt(rt2x00dev);
+ rt2800_bbp_write(rt2x00dev, 105, 0x34);
+
+ rt2800_bbp_write(rt2x00dev, 106, 0x05);
+
+ rt2800_bbp_write(rt2x00dev, 120, 0x50);
+
+ rt2800_bbp_write(rt2x00dev, 137, 0x0f);
+
+ rt2800_bbp_write(rt2x00dev, 163, 0xbd);
+ /* Set ITxBF timeout to 0x9c40=1000msec */
+ rt2800_bbp_write(rt2x00dev, 179, 0x02);
+ rt2800_bbp_write(rt2x00dev, 180, 0x00);
+ rt2800_bbp_write(rt2x00dev, 182, 0x40);
+ rt2800_bbp_write(rt2x00dev, 180, 0x01);
+ rt2800_bbp_write(rt2x00dev, 182, 0x9c);
+ rt2800_bbp_write(rt2x00dev, 179, 0x00);
+ /* Reprogram the inband interface to put right values in RXWI */
+ rt2800_bbp_write(rt2x00dev, 142, 0x04);
+ rt2800_bbp_write(rt2x00dev, 143, 0x3b);
+ rt2800_bbp_write(rt2x00dev, 142, 0x06);
+ rt2800_bbp_write(rt2x00dev, 143, 0xa0);
+ rt2800_bbp_write(rt2x00dev, 142, 0x07);
+ rt2800_bbp_write(rt2x00dev, 143, 0xa1);
+ rt2800_bbp_write(rt2x00dev, 142, 0x08);
+ rt2800_bbp_write(rt2x00dev, 143, 0xa2);
+
+ rt2800_bbp_write(rt2x00dev, 148, 0xc8);
+}
- rt2800_bbp4_mac_if_ctrl(rt2x00dev);
+static void rt2800_init_bbp_3390(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_bbp_write(rt2x00dev, 65, 0x2c);
+ rt2800_bbp_write(rt2x00dev, 66, 0x38);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
- div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY);
- ant = (div_mode == 3) ? 1 : 0;
- rt2800_bbp_read(rt2x00dev, 152, &value);
- if (ant == 0) {
- /* Main antenna */
- rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1);
- } else {
- /* Auxiliary antenna */
- rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0);
- }
- rt2800_bbp_write(rt2x00dev, 152, value);
+ rt2800_bbp_write(rt2x00dev, 69, 0x12);
+ rt2800_bbp_write(rt2x00dev, 73, 0x10);
- if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) {
- rt2800_bbp_read(rt2x00dev, 254, &value);
- rt2x00_set_field8(&value, BBP254_BIT7, 1);
- rt2800_bbp_write(rt2x00dev, 254, value);
- }
+ rt2800_bbp_write(rt2x00dev, 70, 0x0a);
- rt2800_init_freq_calibration(rt2x00dev);
+ rt2800_bbp_write(rt2x00dev, 79, 0x13);
+ rt2800_bbp_write(rt2x00dev, 80, 0x05);
+ rt2800_bbp_write(rt2x00dev, 81, 0x33);
- rt2800_bbp_write(rt2x00dev, 84, 0x19);
- if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C))
+ rt2800_bbp_write(rt2x00dev, 82, 0x62);
+
+ rt2800_bbp_write(rt2x00dev, 83, 0x6a);
+
+ rt2800_bbp_write(rt2x00dev, 84, 0x99);
+
+ rt2800_bbp_write(rt2x00dev, 86, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 91, 0x04);
+
+ rt2800_bbp_write(rt2x00dev, 92, 0x00);
+
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E))
rt2800_bbp_write(rt2x00dev, 103, 0xc0);
+ else
+ rt2800_bbp_write(rt2x00dev, 103, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 105, 0x05);
+
+ rt2800_bbp_write(rt2x00dev, 106, 0x35);
+
+ rt2800_disable_unused_dac_adc(rt2x00dev);
}
-static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
+static void rt2800_init_bbp_3572(struct rt2x00_dev *rt2x00dev)
{
- unsigned int i;
- u16 eeprom;
- u8 reg_id;
- u8 value;
+ rt2800_bbp_write(rt2x00dev, 31, 0x08);
- if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev) ||
- rt2800_wait_bbp_ready(rt2x00dev)))
- return -EACCES;
+ rt2800_bbp_write(rt2x00dev, 65, 0x2c);
+ rt2800_bbp_write(rt2x00dev, 66, 0x38);
- if (rt2x00_rt(rt2x00dev, RT5592)) {
- rt2800_init_bbp_5592(rt2x00dev);
- return 0;
- }
+ rt2800_bbp_write(rt2x00dev, 69, 0x12);
+ rt2800_bbp_write(rt2x00dev, 73, 0x10);
- if (rt2x00_rt(rt2x00dev, RT3352)) {
- rt2800_bbp_write(rt2x00dev, 3, 0x00);
- rt2800_bbp_write(rt2x00dev, 4, 0x50);
- }
+ rt2800_bbp_write(rt2x00dev, 70, 0x0a);
- if (rt2x00_rt(rt2x00dev, RT3290) ||
- rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392))
- rt2800_bbp4_mac_if_ctrl(rt2x00dev);
+ rt2800_bbp_write(rt2x00dev, 79, 0x13);
+ rt2800_bbp_write(rt2x00dev, 80, 0x05);
+ rt2800_bbp_write(rt2x00dev, 81, 0x33);
- if (rt2800_is_305x_soc(rt2x00dev) ||
- rt2x00_rt(rt2x00dev, RT3290) ||
- rt2x00_rt(rt2x00dev, RT3352) ||
- rt2x00_rt(rt2x00dev, RT3572) ||
- rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392))
- rt2800_bbp_write(rt2x00dev, 31, 0x08);
+ rt2800_bbp_write(rt2x00dev, 82, 0x62);
- if (rt2x00_rt(rt2x00dev, RT3352))
- rt2800_bbp_write(rt2x00dev, 47, 0x48);
+ rt2800_bbp_write(rt2x00dev, 83, 0x6a);
+
+ rt2800_bbp_write(rt2x00dev, 84, 0x99);
+
+ rt2800_bbp_write(rt2x00dev, 86, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 91, 0x04);
+
+ rt2800_bbp_write(rt2x00dev, 92, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 103, 0xc0);
+
+ rt2800_bbp_write(rt2x00dev, 105, 0x05);
+
+ rt2800_bbp_write(rt2x00dev, 106, 0x35);
+
+ rt2800_disable_unused_dac_adc(rt2x00dev);
+}
+
+static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
+{
+ int ant, div_mode;
+ u16 eeprom;
+ u8 value;
+
+ rt2800_bbp4_mac_if_ctrl(rt2x00dev);
+
+ rt2800_bbp_write(rt2x00dev, 31, 0x08);
rt2800_bbp_write(rt2x00dev, 65, 0x2c);
rt2800_bbp_write(rt2x00dev, 66, 0x38);
- if (rt2x00_rt(rt2x00dev, RT3290) ||
- rt2x00_rt(rt2x00dev, RT3352) ||
- rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392))
- rt2800_bbp_write(rt2x00dev, 68, 0x0b);
+ rt2800_bbp_write(rt2x00dev, 68, 0x0b);
- if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) {
- rt2800_bbp_write(rt2x00dev, 69, 0x16);
- rt2800_bbp_write(rt2x00dev, 73, 0x12);
- } else if (rt2x00_rt(rt2x00dev, RT3290) ||
- rt2x00_rt(rt2x00dev, RT3352) ||
- rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392)) {
- rt2800_bbp_write(rt2x00dev, 69, 0x12);
- rt2800_bbp_write(rt2x00dev, 73, 0x13);
- rt2800_bbp_write(rt2x00dev, 75, 0x46);
- rt2800_bbp_write(rt2x00dev, 76, 0x28);
+ rt2800_bbp_write(rt2x00dev, 69, 0x12);
+ rt2800_bbp_write(rt2x00dev, 73, 0x13);
+ rt2800_bbp_write(rt2x00dev, 75, 0x46);
+ rt2800_bbp_write(rt2x00dev, 76, 0x28);
- if (rt2x00_rt(rt2x00dev, RT3290))
- rt2800_bbp_write(rt2x00dev, 77, 0x58);
- else
- rt2800_bbp_write(rt2x00dev, 77, 0x59);
- } else {
- rt2800_bbp_write(rt2x00dev, 69, 0x12);
- rt2800_bbp_write(rt2x00dev, 73, 0x10);
- }
+ rt2800_bbp_write(rt2x00dev, 77, 0x59);
rt2800_bbp_write(rt2x00dev, 70, 0x0a);
- if (rt2x00_rt(rt2x00dev, RT3070) ||
- rt2x00_rt(rt2x00dev, RT3071) ||
- rt2x00_rt(rt2x00dev, RT3090) ||
- rt2x00_rt(rt2x00dev, RT3390) ||
- rt2x00_rt(rt2x00dev, RT3572) ||
- rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392)) {
- rt2800_bbp_write(rt2x00dev, 79, 0x13);
- rt2800_bbp_write(rt2x00dev, 80, 0x05);
- rt2800_bbp_write(rt2x00dev, 81, 0x33);
- } else if (rt2800_is_305x_soc(rt2x00dev)) {
- rt2800_bbp_write(rt2x00dev, 78, 0x0e);
- rt2800_bbp_write(rt2x00dev, 80, 0x08);
- } else if (rt2x00_rt(rt2x00dev, RT3290)) {
- rt2800_bbp_write(rt2x00dev, 74, 0x0b);
- rt2800_bbp_write(rt2x00dev, 79, 0x18);
- rt2800_bbp_write(rt2x00dev, 80, 0x09);
- rt2800_bbp_write(rt2x00dev, 81, 0x33);
- } else if (rt2x00_rt(rt2x00dev, RT3352)) {
- rt2800_bbp_write(rt2x00dev, 78, 0x0e);
- rt2800_bbp_write(rt2x00dev, 80, 0x08);
- rt2800_bbp_write(rt2x00dev, 81, 0x37);
- } else {
- rt2800_bbp_write(rt2x00dev, 81, 0x37);
- }
+ rt2800_bbp_write(rt2x00dev, 79, 0x13);
+ rt2800_bbp_write(rt2x00dev, 80, 0x05);
+ rt2800_bbp_write(rt2x00dev, 81, 0x33);
rt2800_bbp_write(rt2x00dev, 82, 0x62);
- if (rt2x00_rt(rt2x00dev, RT3290) ||
- rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392))
- rt2800_bbp_write(rt2x00dev, 83, 0x7a);
- else
- rt2800_bbp_write(rt2x00dev, 83, 0x6a);
- if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D))
- rt2800_bbp_write(rt2x00dev, 84, 0x19);
- else if (rt2x00_rt(rt2x00dev, RT3290) ||
- rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392))
- rt2800_bbp_write(rt2x00dev, 84, 0x9a);
- else
- rt2800_bbp_write(rt2x00dev, 84, 0x99);
+ rt2800_bbp_write(rt2x00dev, 83, 0x7a);
- if (rt2x00_rt(rt2x00dev, RT3290) ||
- rt2x00_rt(rt2x00dev, RT3352) ||
- rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392))
- rt2800_bbp_write(rt2x00dev, 86, 0x38);
- else
- rt2800_bbp_write(rt2x00dev, 86, 0x00);
+ rt2800_bbp_write(rt2x00dev, 84, 0x9a);
- if (rt2x00_rt(rt2x00dev, RT3352) ||
- rt2x00_rt(rt2x00dev, RT5392))
+ rt2800_bbp_write(rt2x00dev, 86, 0x38);
+
+ if (rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 88, 0x90);
rt2800_bbp_write(rt2x00dev, 91, 0x04);
- if (rt2x00_rt(rt2x00dev, RT3290) ||
- rt2x00_rt(rt2x00dev, RT3352) ||
- rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392))
- rt2800_bbp_write(rt2x00dev, 92, 0x02);
- else
- rt2800_bbp_write(rt2x00dev, 92, 0x00);
+ rt2800_bbp_write(rt2x00dev, 92, 0x02);
if (rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_bbp_write(rt2x00dev, 95, 0x9a);
rt2800_bbp_write(rt2x00dev, 98, 0x12);
}
- if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) ||
- rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) ||
- rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) ||
- rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) ||
- rt2x00_rt(rt2x00dev, RT3290) ||
- rt2x00_rt(rt2x00dev, RT3352) ||
- rt2x00_rt(rt2x00dev, RT3572) ||
- rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392) ||
- rt2800_is_305x_soc(rt2x00dev))
- rt2800_bbp_write(rt2x00dev, 103, 0xc0);
- else
- rt2800_bbp_write(rt2x00dev, 103, 0x00);
+ rt2800_bbp_write(rt2x00dev, 103, 0xc0);
- if (rt2x00_rt(rt2x00dev, RT3290) ||
- rt2x00_rt(rt2x00dev, RT3352) ||
- rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392))
- rt2800_bbp_write(rt2x00dev, 104, 0x92);
+ rt2800_bbp_write(rt2x00dev, 104, 0x92);
- if (rt2800_is_305x_soc(rt2x00dev))
- rt2800_bbp_write(rt2x00dev, 105, 0x01);
- else if (rt2x00_rt(rt2x00dev, RT3290))
- rt2800_bbp_write(rt2x00dev, 105, 0x1c);
- else if (rt2x00_rt(rt2x00dev, RT3352))
- rt2800_bbp_write(rt2x00dev, 105, 0x34);
- else if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392))
- rt2800_bbp_write(rt2x00dev, 105, 0x3c);
- else
- rt2800_bbp_write(rt2x00dev, 105, 0x05);
+ rt2800_bbp_write(rt2x00dev, 105, 0x3c);
- if (rt2x00_rt(rt2x00dev, RT3290) ||
- rt2x00_rt(rt2x00dev, RT5390))
+ if (rt2x00_rt(rt2x00dev, RT5390))
rt2800_bbp_write(rt2x00dev, 106, 0x03);
- else if (rt2x00_rt(rt2x00dev, RT3352))
- rt2800_bbp_write(rt2x00dev, 106, 0x05);
else if (rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 106, 0x12);
else
- rt2800_bbp_write(rt2x00dev, 106, 0x35);
-
- if (rt2x00_rt(rt2x00dev, RT3352))
- rt2800_bbp_write(rt2x00dev, 120, 0x50);
+ WARN_ON(1);
- if (rt2x00_rt(rt2x00dev, RT3290) ||
- rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392))
- rt2800_bbp_write(rt2x00dev, 128, 0x12);
+ rt2800_bbp_write(rt2x00dev, 128, 0x12);
if (rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_bbp_write(rt2x00dev, 134, 0xd0);
rt2800_bbp_write(rt2x00dev, 135, 0xf6);
}
- if (rt2x00_rt(rt2x00dev, RT3352))
- rt2800_bbp_write(rt2x00dev, 137, 0x0f);
+ rt2800_disable_unused_dac_adc(rt2x00dev);
- if (rt2x00_rt(rt2x00dev, RT3071) ||
- rt2x00_rt(rt2x00dev, RT3090) ||
- rt2x00_rt(rt2x00dev, RT3390) ||
- rt2x00_rt(rt2x00dev, RT3572) ||
- rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392)) {
- rt2800_bbp_read(rt2x00dev, 138, &value);
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ div_mode = rt2x00_get_field16(eeprom,
+ EEPROM_NIC_CONF1_ANT_DIVERSITY);
+ ant = (div_mode == 3) ? 1 : 0;
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
- if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
- value |= 0x20;
- if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
- value &= ~0x02;
+ /* check if this is a Bluetooth combo card */
+ if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
+ u32 reg;
- rt2800_bbp_write(rt2x00dev, 138, value);
+ rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
+ rt2x00_set_field32(&reg, GPIO_CTRL_DIR3, 0);
+ rt2x00_set_field32(&reg, GPIO_CTRL_DIR6, 0);
+ rt2x00_set_field32(&reg, GPIO_CTRL_VAL3, 0);
+ rt2x00_set_field32(&reg, GPIO_CTRL_VAL6, 0);
+ if (ant == 0)
+ rt2x00_set_field32(&reg, GPIO_CTRL_VAL3, 1);
+ else if (ant == 1)
+ rt2x00_set_field32(&reg, GPIO_CTRL_VAL6, 1);
+ rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
}
- if (rt2x00_rt(rt2x00dev, RT3290)) {
- rt2800_bbp_write(rt2x00dev, 67, 0x24);
- rt2800_bbp_write(rt2x00dev, 143, 0x04);
- rt2800_bbp_write(rt2x00dev, 142, 0x99);
- rt2800_bbp_write(rt2x00dev, 150, 0x30);
- rt2800_bbp_write(rt2x00dev, 151, 0x2e);
- rt2800_bbp_write(rt2x00dev, 152, 0x20);
- rt2800_bbp_write(rt2x00dev, 153, 0x34);
- rt2800_bbp_write(rt2x00dev, 154, 0x40);
- rt2800_bbp_write(rt2x00dev, 155, 0x3b);
- rt2800_bbp_write(rt2x00dev, 253, 0x04);
-
- rt2800_bbp_read(rt2x00dev, 47, &value);
- rt2x00_set_field8(&value, BBP47_TSSI_ADC6, 1);
- rt2800_bbp_write(rt2x00dev, 47, value);
-
- /* Use 5-bit ADC for Acquisition and 8-bit ADC for data */
- rt2800_bbp_read(rt2x00dev, 3, &value);
- rt2x00_set_field8(&value, BBP3_ADC_MODE_SWITCH, 1);
- rt2x00_set_field8(&value, BBP3_ADC_INIT_MODE, 1);
- rt2800_bbp_write(rt2x00dev, 3, value);
+ /* This chip has hardware antenna diversity*/
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R)) {
+ rt2800_bbp_write(rt2x00dev, 150, 0); /* Disable Antenna Software OFDM */
+ rt2800_bbp_write(rt2x00dev, 151, 0); /* Disable Antenna Software CCK */
+ rt2800_bbp_write(rt2x00dev, 154, 0); /* Clear previously selected antenna */
}
- if (rt2x00_rt(rt2x00dev, RT3352)) {
- rt2800_bbp_write(rt2x00dev, 163, 0xbd);
- /* Set ITxBF timeout to 0x9c40=1000msec */
- rt2800_bbp_write(rt2x00dev, 179, 0x02);
- rt2800_bbp_write(rt2x00dev, 180, 0x00);
- rt2800_bbp_write(rt2x00dev, 182, 0x40);
- rt2800_bbp_write(rt2x00dev, 180, 0x01);
- rt2800_bbp_write(rt2x00dev, 182, 0x9c);
- rt2800_bbp_write(rt2x00dev, 179, 0x00);
- /* Reprogram the inband interface to put right values in RXWI */
- rt2800_bbp_write(rt2x00dev, 142, 0x04);
- rt2800_bbp_write(rt2x00dev, 143, 0x3b);
- rt2800_bbp_write(rt2x00dev, 142, 0x06);
- rt2800_bbp_write(rt2x00dev, 143, 0xa0);
- rt2800_bbp_write(rt2x00dev, 142, 0x07);
- rt2800_bbp_write(rt2x00dev, 143, 0xa1);
- rt2800_bbp_write(rt2x00dev, 142, 0x08);
- rt2800_bbp_write(rt2x00dev, 143, 0xa2);
-
- rt2800_bbp_write(rt2x00dev, 148, 0xc8);
+ rt2800_bbp_read(rt2x00dev, 152, &value);
+ if (ant == 0)
+ rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1);
+ else
+ rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0);
+ rt2800_bbp_write(rt2x00dev, 152, value);
+
+ rt2800_init_freq_calibration(rt2x00dev);
+}
+
+static void rt2800_init_bbp_5592(struct rt2x00_dev *rt2x00dev)
+{
+ int ant, div_mode;
+ u16 eeprom;
+ u8 value;
+
+ rt2800_init_bbp_early(rt2x00dev);
+
+ rt2800_bbp_read(rt2x00dev, 105, &value);
+ rt2x00_set_field8(&value, BBP105_MLD,
+ rt2x00dev->default_ant.rx_chain_num == 2);
+ rt2800_bbp_write(rt2x00dev, 105, value);
+
+ rt2800_bbp4_mac_if_ctrl(rt2x00dev);
+
+ rt2800_bbp_write(rt2x00dev, 20, 0x06);
+ rt2800_bbp_write(rt2x00dev, 31, 0x08);
+ rt2800_bbp_write(rt2x00dev, 65, 0x2C);
+ rt2800_bbp_write(rt2x00dev, 68, 0xDD);
+ rt2800_bbp_write(rt2x00dev, 69, 0x1A);
+ rt2800_bbp_write(rt2x00dev, 70, 0x05);
+ rt2800_bbp_write(rt2x00dev, 73, 0x13);
+ rt2800_bbp_write(rt2x00dev, 74, 0x0F);
+ rt2800_bbp_write(rt2x00dev, 75, 0x4F);
+ rt2800_bbp_write(rt2x00dev, 76, 0x28);
+ rt2800_bbp_write(rt2x00dev, 77, 0x59);
+ rt2800_bbp_write(rt2x00dev, 84, 0x9A);
+ rt2800_bbp_write(rt2x00dev, 86, 0x38);
+ rt2800_bbp_write(rt2x00dev, 88, 0x90);
+ rt2800_bbp_write(rt2x00dev, 91, 0x04);
+ rt2800_bbp_write(rt2x00dev, 92, 0x02);
+ rt2800_bbp_write(rt2x00dev, 95, 0x9a);
+ rt2800_bbp_write(rt2x00dev, 98, 0x12);
+ rt2800_bbp_write(rt2x00dev, 103, 0xC0);
+ rt2800_bbp_write(rt2x00dev, 104, 0x92);
+ /* FIXME BBP105 owerwrite */
+ rt2800_bbp_write(rt2x00dev, 105, 0x3C);
+ rt2800_bbp_write(rt2x00dev, 106, 0x35);
+ rt2800_bbp_write(rt2x00dev, 128, 0x12);
+ rt2800_bbp_write(rt2x00dev, 134, 0xD0);
+ rt2800_bbp_write(rt2x00dev, 135, 0xF6);
+ rt2800_bbp_write(rt2x00dev, 137, 0x0F);
+
+ /* Initialize GLRT (Generalized Likehood Radio Test) */
+ rt2800_init_bbp_5592_glrt(rt2x00dev);
+
+ rt2800_bbp4_mac_if_ctrl(rt2x00dev);
+
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY);
+ ant = (div_mode == 3) ? 1 : 0;
+ rt2800_bbp_read(rt2x00dev, 152, &value);
+ if (ant == 0) {
+ /* Main antenna */
+ rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1);
+ } else {
+ /* Auxiliary antenna */
+ rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0);
}
+ rt2800_bbp_write(rt2x00dev, 152, value);
- if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392)) {
- int ant, div_mode;
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) {
+ rt2800_bbp_read(rt2x00dev, 254, &value);
+ rt2x00_set_field8(&value, BBP254_BIT7, 1);
+ rt2800_bbp_write(rt2x00dev, 254, value);
+ }
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
- div_mode = rt2x00_get_field16(eeprom,
- EEPROM_NIC_CONF1_ANT_DIVERSITY);
- ant = (div_mode == 3) ? 1 : 0;
+ rt2800_init_freq_calibration(rt2x00dev);
- /* check if this is a Bluetooth combo card */
- if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
- u32 reg;
-
- rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
- rt2x00_set_field32(&reg, GPIO_CTRL_DIR3, 0);
- rt2x00_set_field32(&reg, GPIO_CTRL_DIR6, 0);
- rt2x00_set_field32(&reg, GPIO_CTRL_VAL3, 0);
- rt2x00_set_field32(&reg, GPIO_CTRL_VAL6, 0);
- if (ant == 0)
- rt2x00_set_field32(&reg, GPIO_CTRL_VAL3, 1);
- else if (ant == 1)
- rt2x00_set_field32(&reg, GPIO_CTRL_VAL6, 1);
- rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
- }
+ rt2800_bbp_write(rt2x00dev, 84, 0x19);
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C))
+ rt2800_bbp_write(rt2x00dev, 103, 0xc0);
+}
- /* This chip has hardware antenna diversity*/
- if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R)) {
- rt2800_bbp_write(rt2x00dev, 150, 0); /* Disable Antenna Software OFDM */
- rt2800_bbp_write(rt2x00dev, 151, 0); /* Disable Antenna Software CCK */
- rt2800_bbp_write(rt2x00dev, 154, 0); /* Clear previously selected antenna */
- }
+static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
+{
+ unsigned int i;
+ u16 eeprom;
+ u8 reg_id;
+ u8 value;
- rt2800_bbp_read(rt2x00dev, 152, &value);
- if (ant == 0)
- rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1);
- else
- rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0);
- rt2800_bbp_write(rt2x00dev, 152, value);
+ if (rt2800_is_305x_soc(rt2x00dev))
+ rt2800_init_bbp_305x_soc(rt2x00dev);
- rt2800_init_freq_calibration(rt2x00dev);
+ switch (rt2x00dev->chip.rt) {
+ case RT2860:
+ case RT2872:
+ case RT2883:
+ rt2800_init_bbp_28xx(rt2x00dev);
+ break;
+ case RT3070:
+ case RT3071:
+ case RT3090:
+ rt2800_init_bbp_30xx(rt2x00dev);
+ break;
+ case RT3290:
+ rt2800_init_bbp_3290(rt2x00dev);
+ break;
+ case RT3352:
+ rt2800_init_bbp_3352(rt2x00dev);
+ break;
+ case RT3390:
+ rt2800_init_bbp_3390(rt2x00dev);
+ break;
+ case RT3572:
+ rt2800_init_bbp_3572(rt2x00dev);
+ break;
+ case RT5390:
+ case RT5392:
+ rt2800_init_bbp_53xx(rt2x00dev);
+ break;
+ case RT5592:
+ rt2800_init_bbp_5592(rt2x00dev);
+ return;
}
for (i = 0; i < EEPROM_BBP_SIZE; i++) {
@@ -4344,8 +4565,6 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, reg_id, value);
}
}
-
- return 0;
}
static void rt2800_led_open_drain_enable(struct rt2x00_dev *rt2x00dev)
@@ -5196,9 +5415,11 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
}
msleep(1);
- if (unlikely(rt2800_init_bbp(rt2x00dev)))
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev) ||
+ rt2800_wait_bbp_ready(rt2x00dev)))
return -EIO;
+ rt2800_init_bbp(rt2x00dev);
rt2800_init_rfcsr(rt2x00dev);
if (rt2x00_is_usb(rt2x00dev) &&
@@ -6056,8 +6277,8 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
for (i = 14; i < spec->num_channels; i++) {
- info[i].default_power1 = default_power1[i];
- info[i].default_power2 = default_power2[i];
+ info[i].default_power1 = default_power1[i - 14];
+ info[i].default_power2 = default_power2[i - 14];
}
}
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 6f4a861af336..00055627eb8d 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -637,6 +637,7 @@ static void rt2800pci_write_tx_desc(struct queue_entry *entry,
struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
__le32 *txd = entry_priv->desc;
u32 word;
+ const unsigned int txwi_size = entry->queue->winfo_size;
/*
* The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
@@ -659,14 +660,14 @@ static void rt2800pci_write_tx_desc(struct queue_entry *entry,
!test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W1_BURST,
test_bit(ENTRY_TXD_BURST, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W1_SD_LEN0, TXWI_DESC_SIZE);
+ rt2x00_set_field32(&word, TXD_W1_SD_LEN0, txwi_size);
rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
rt2x00_desc_write(txd, 1, word);
word = 0;
rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
- skbdesc->skb_dma + TXWI_DESC_SIZE);
+ skbdesc->skb_dma + txwi_size);
rt2x00_desc_write(txd, 2, word);
word = 0;
@@ -1014,7 +1015,7 @@ static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
* Since we have only one producer and one consumer we don't
* need to lock the kfifo.
*/
- for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) {
+ for (i = 0; i < rt2x00dev->tx->limit; i++) {
rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO, &status);
if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
@@ -1186,29 +1187,43 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
.sta_remove = rt2800_sta_remove,
};
-static const struct data_queue_desc rt2800pci_queue_rx = {
- .entry_num = 128,
- .data_size = AGGREGATION_SIZE,
- .desc_size = RXD_DESC_SIZE,
- .winfo_size = RXWI_DESC_SIZE,
- .priv_size = sizeof(struct queue_entry_priv_mmio),
-};
+static void rt2800pci_queue_init(struct data_queue *queue)
+{
+ switch (queue->qid) {
+ case QID_RX:
+ queue->limit = 128;
+ queue->data_size = AGGREGATION_SIZE;
+ queue->desc_size = RXD_DESC_SIZE;
+ queue->winfo_size = RXWI_DESC_SIZE_4WORDS;
+ queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+ break;
-static const struct data_queue_desc rt2800pci_queue_tx = {
- .entry_num = 64,
- .data_size = AGGREGATION_SIZE,
- .desc_size = TXD_DESC_SIZE,
- .winfo_size = TXWI_DESC_SIZE,
- .priv_size = sizeof(struct queue_entry_priv_mmio),
-};
+ case QID_AC_VO:
+ case QID_AC_VI:
+ case QID_AC_BE:
+ case QID_AC_BK:
+ queue->limit = 64;
+ queue->data_size = AGGREGATION_SIZE;
+ queue->desc_size = TXD_DESC_SIZE;
+ queue->winfo_size = TXWI_DESC_SIZE_4WORDS;
+ queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+ break;
-static const struct data_queue_desc rt2800pci_queue_bcn = {
- .entry_num = 8,
- .data_size = 0, /* No DMA required for beacons */
- .desc_size = TXD_DESC_SIZE,
- .winfo_size = TXWI_DESC_SIZE,
- .priv_size = sizeof(struct queue_entry_priv_mmio),
-};
+ case QID_BEACON:
+ queue->limit = 8;
+ queue->data_size = 0; /* No DMA required for beacons */
+ queue->desc_size = TXD_DESC_SIZE;
+ queue->winfo_size = TXWI_DESC_SIZE_4WORDS;
+ queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+ break;
+
+ case QID_ATIM:
+ /* fallthrough */
+ default:
+ BUG();
+ break;
+ }
+}
static const struct rt2x00_ops rt2800pci_ops = {
.name = KBUILD_MODNAME,
@@ -1217,10 +1232,7 @@ static const struct rt2x00_ops rt2800pci_ops = {
.eeprom_size = EEPROM_SIZE,
.rf_size = RF_SIZE,
.tx_queues = NUM_TX_QUEUES,
- .extra_tx_headroom = TXWI_DESC_SIZE,
- .rx = &rt2800pci_queue_rx,
- .tx = &rt2800pci_queue_tx,
- .bcn = &rt2800pci_queue_bcn,
+ .queue_init = rt2800pci_queue_init,
.lib = &rt2800pci_rt2x00_ops,
.drv = &rt2800pci_rt2800_ops,
.hw = &rt2800pci_mac80211_ops,
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index ac854d75bd6c..840833b26bfa 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -327,7 +327,7 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
* this limit so reduce the number to prevent errors.
*/
rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_LIMIT,
- ((rt2x00dev->ops->rx->entry_num * DATA_FRAME_SIZE)
+ ((rt2x00dev->rx->limit * DATA_FRAME_SIZE)
/ 1024) - 3);
rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_EN, 1);
rt2x00_set_field32(&reg, USB_DMA_CFG_TX_BULK_EN, 1);
@@ -849,85 +849,63 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
.sta_remove = rt2800_sta_remove,
};
-static const struct data_queue_desc rt2800usb_queue_rx = {
- .entry_num = 128,
- .data_size = AGGREGATION_SIZE,
- .desc_size = RXINFO_DESC_SIZE,
- .winfo_size = RXWI_DESC_SIZE,
- .priv_size = sizeof(struct queue_entry_priv_usb),
-};
-
-static const struct data_queue_desc rt2800usb_queue_tx = {
- .entry_num = 16,
- .data_size = AGGREGATION_SIZE,
- .desc_size = TXINFO_DESC_SIZE,
- .winfo_size = TXWI_DESC_SIZE,
- .priv_size = sizeof(struct queue_entry_priv_usb),
-};
-
-static const struct data_queue_desc rt2800usb_queue_bcn = {
- .entry_num = 8,
- .data_size = MGMT_FRAME_SIZE,
- .desc_size = TXINFO_DESC_SIZE,
- .winfo_size = TXWI_DESC_SIZE,
- .priv_size = sizeof(struct queue_entry_priv_usb),
-};
+static void rt2800usb_queue_init(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ unsigned short txwi_size, rxwi_size;
-static const struct rt2x00_ops rt2800usb_ops = {
- .name = KBUILD_MODNAME,
- .drv_data_size = sizeof(struct rt2800_drv_data),
- .max_ap_intf = 8,
- .eeprom_size = EEPROM_SIZE,
- .rf_size = RF_SIZE,
- .tx_queues = NUM_TX_QUEUES,
- .extra_tx_headroom = TXINFO_DESC_SIZE + TXWI_DESC_SIZE,
- .rx = &rt2800usb_queue_rx,
- .tx = &rt2800usb_queue_tx,
- .bcn = &rt2800usb_queue_bcn,
- .lib = &rt2800usb_rt2x00_ops,
- .drv = &rt2800usb_rt2800_ops,
- .hw = &rt2800usb_mac80211_ops,
-#ifdef CONFIG_RT2X00_LIB_DEBUGFS
- .debugfs = &rt2800_rt2x00debug,
-#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
-};
+ if (rt2x00_rt(rt2x00dev, RT5592)) {
+ txwi_size = TXWI_DESC_SIZE_5WORDS;
+ rxwi_size = RXWI_DESC_SIZE_6WORDS;
+ } else {
+ txwi_size = TXWI_DESC_SIZE_4WORDS;
+ rxwi_size = RXWI_DESC_SIZE_4WORDS;
+ }
-static const struct data_queue_desc rt2800usb_queue_rx_5592 = {
- .entry_num = 128,
- .data_size = AGGREGATION_SIZE,
- .desc_size = RXINFO_DESC_SIZE,
- .winfo_size = RXWI_DESC_SIZE_5592,
- .priv_size = sizeof(struct queue_entry_priv_usb),
-};
+ switch (queue->qid) {
+ case QID_RX:
+ queue->limit = 128;
+ queue->data_size = AGGREGATION_SIZE;
+ queue->desc_size = RXINFO_DESC_SIZE;
+ queue->winfo_size = rxwi_size;
+ queue->priv_size = sizeof(struct queue_entry_priv_usb);
+ break;
-static const struct data_queue_desc rt2800usb_queue_tx_5592 = {
- .entry_num = 16,
- .data_size = AGGREGATION_SIZE,
- .desc_size = TXINFO_DESC_SIZE,
- .winfo_size = TXWI_DESC_SIZE_5592,
- .priv_size = sizeof(struct queue_entry_priv_usb),
-};
+ case QID_AC_VO:
+ case QID_AC_VI:
+ case QID_AC_BE:
+ case QID_AC_BK:
+ queue->limit = 16;
+ queue->data_size = AGGREGATION_SIZE;
+ queue->desc_size = TXINFO_DESC_SIZE;
+ queue->winfo_size = txwi_size;
+ queue->priv_size = sizeof(struct queue_entry_priv_usb);
+ break;
-static const struct data_queue_desc rt2800usb_queue_bcn_5592 = {
- .entry_num = 8,
- .data_size = MGMT_FRAME_SIZE,
- .desc_size = TXINFO_DESC_SIZE,
- .winfo_size = TXWI_DESC_SIZE_5592,
- .priv_size = sizeof(struct queue_entry_priv_usb),
-};
+ case QID_BEACON:
+ queue->limit = 8;
+ queue->data_size = MGMT_FRAME_SIZE;
+ queue->desc_size = TXINFO_DESC_SIZE;
+ queue->winfo_size = txwi_size;
+ queue->priv_size = sizeof(struct queue_entry_priv_usb);
+ break;
+ case QID_ATIM:
+ /* fallthrough */
+ default:
+ BUG();
+ break;
+ }
+}
-static const struct rt2x00_ops rt2800usb_ops_5592 = {
+static const struct rt2x00_ops rt2800usb_ops = {
.name = KBUILD_MODNAME,
.drv_data_size = sizeof(struct rt2800_drv_data),
.max_ap_intf = 8,
.eeprom_size = EEPROM_SIZE,
.rf_size = RF_SIZE,
.tx_queues = NUM_TX_QUEUES,
- .extra_tx_headroom = TXINFO_DESC_SIZE + TXWI_DESC_SIZE_5592,
- .rx = &rt2800usb_queue_rx_5592,
- .tx = &rt2800usb_queue_tx_5592,
- .bcn = &rt2800usb_queue_bcn_5592,
+ .queue_init = rt2800usb_queue_init,
.lib = &rt2800usb_rt2x00_ops,
.drv = &rt2800usb_rt2800_ops,
.hw = &rt2800usb_mac80211_ops,
@@ -1248,15 +1226,15 @@ static struct usb_device_id rt2800usb_device_table[] = {
#endif
#ifdef CONFIG_RT2800USB_RT55XX
/* Arcadyan */
- { USB_DEVICE(0x043e, 0x7a32), .driver_info = 5592 },
+ { USB_DEVICE(0x043e, 0x7a32) },
/* AVM GmbH */
- { USB_DEVICE(0x057c, 0x8501), .driver_info = 5592 },
+ { USB_DEVICE(0x057c, 0x8501) },
/* D-Link DWA-160-B2 */
- { USB_DEVICE(0x2001, 0x3c1a), .driver_info = 5592 },
+ { USB_DEVICE(0x2001, 0x3c1a) },
/* Proware */
- { USB_DEVICE(0x043e, 0x7a13), .driver_info = 5592 },
+ { USB_DEVICE(0x043e, 0x7a13) },
/* Ralink */
- { USB_DEVICE(0x148f, 0x5572), .driver_info = 5592 },
+ { USB_DEVICE(0x148f, 0x5572) },
#endif
#ifdef CONFIG_RT2800USB_UNKNOWN
/*
@@ -1361,9 +1339,6 @@ MODULE_LICENSE("GPL");
static int rt2800usb_probe(struct usb_interface *usb_intf,
const struct usb_device_id *id)
{
- if (id->driver_info == 5592)
- return rt2x00usb_probe(usb_intf, &rt2800usb_ops_5592);
-
return rt2x00usb_probe(usb_intf, &rt2800usb_ops);
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 7510723a8c37..ee3fc570b11d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -648,11 +648,7 @@ struct rt2x00_ops {
const unsigned int eeprom_size;
const unsigned int rf_size;
const unsigned int tx_queues;
- const unsigned int extra_tx_headroom;
- const struct data_queue_desc *rx;
- const struct data_queue_desc *tx;
- const struct data_queue_desc *bcn;
- const struct data_queue_desc *atim;
+ void (*queue_init)(struct data_queue *queue);
const struct rt2x00lib_ops *lib;
const void *drv;
const struct ieee80211_ops *hw;
@@ -1010,6 +1006,9 @@ struct rt2x00_dev {
*/
struct list_head bar_list;
spinlock_t bar_list_lock;
+
+ /* Extra TX headroom required for alignment purposes. */
+ unsigned int extra_tx_headroom;
};
struct rt2x00_bar_list_entry {
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index c8b9ef0c21f8..b16521e6bf4a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -334,7 +334,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
/*
* Remove the extra tx headroom from the skb.
*/
- skb_pull(entry->skb, rt2x00dev->ops->extra_tx_headroom);
+ skb_pull(entry->skb, rt2x00dev->extra_tx_headroom);
/*
* Signal that the TX descriptor is no longer in the skb.
@@ -1049,7 +1049,7 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
*/
rt2x00dev->hw->extra_tx_headroom =
max_t(unsigned int, IEEE80211_TX_STATUS_HEADROOM,
- rt2x00dev->ops->extra_tx_headroom);
+ rt2x00dev->extra_tx_headroom);
/*
* Take TX headroom required for alignment into account.
@@ -1077,7 +1077,7 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
*/
int kfifo_size =
roundup_pow_of_two(rt2x00dev->ops->tx_queues *
- rt2x00dev->ops->tx->entry_num *
+ rt2x00dev->tx->limit *
sizeof(u32));
status = kfifo_alloc(&rt2x00dev->txstatus_fifo, kfifo_size,
@@ -1256,6 +1256,17 @@ static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev)
rt2x00dev->hw->wiphy->n_iface_combinations = 1;
}
+static unsigned int rt2x00dev_extra_tx_headroom(struct rt2x00_dev *rt2x00dev)
+{
+ if (WARN_ON(!rt2x00dev->tx))
+ return 0;
+
+ if (rt2x00_is_usb(rt2x00dev))
+ return rt2x00dev->tx[0].winfo_size + rt2x00dev->tx[0].desc_size;
+
+ return rt2x00dev->tx[0].winfo_size;
+}
+
/*
* driver allocation handlers.
*/
@@ -1301,23 +1312,6 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
(rt2x00dev->ops->max_ap_intf - 1);
/*
- * Determine which operating modes are supported, all modes
- * which require beaconing, depend on the availability of
- * beacon entries.
- */
- rt2x00dev->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
- if (rt2x00dev->ops->bcn->entry_num > 0)
- rt2x00dev->hw->wiphy->interface_modes |=
- BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_AP) |
-#ifdef CONFIG_MAC80211_MESH
- BIT(NL80211_IFTYPE_MESH_POINT) |
-#endif
- BIT(NL80211_IFTYPE_WDS);
-
- rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
-
- /*
* Initialize work.
*/
rt2x00dev->workqueue =
@@ -1347,6 +1341,26 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
if (retval)
goto exit;
+ /* Cache TX headroom value */
+ rt2x00dev->extra_tx_headroom = rt2x00dev_extra_tx_headroom(rt2x00dev);
+
+ /*
+ * Determine which operating modes are supported, all modes
+ * which require beaconing, depend on the availability of
+ * beacon entries.
+ */
+ rt2x00dev->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ if (rt2x00dev->bcn->limit > 0)
+ rt2x00dev->hw->wiphy->interface_modes |=
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_WDS);
+
+ rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+
/*
* Initialize ieee80211 structure.
*/
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index dc49e525ae5e..76d95deb274b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -105,11 +105,13 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
goto exit_release_regions;
}
+ pci_enable_msi(pci_dev);
+
hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
if (!hw) {
rt2x00_probe_err("Failed to allocate hardware\n");
retval = -ENOMEM;
- goto exit_release_regions;
+ goto exit_disable_msi;
}
pci_set_drvdata(pci_dev, hw);
@@ -150,6 +152,9 @@ exit_free_reg:
exit_free_device:
ieee80211_free_hw(hw);
+exit_disable_msi:
+ pci_disable_msi(pci_dev);
+
exit_release_regions:
pci_release_regions(pci_dev);
@@ -174,6 +179,8 @@ void rt2x00pci_remove(struct pci_dev *pci_dev)
rt2x00pci_free_reg(rt2x00dev);
ieee80211_free_hw(hw);
+ pci_disable_msi(pci_dev);
+
/*
* Free the PCI device data.
*/
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 2c12311467a9..aa95c6cf3545 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -542,8 +542,8 @@ static int rt2x00queue_write_tx_data(struct queue_entry *entry,
/*
* Add the requested extra tx headroom in front of the skb.
*/
- skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom);
- memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom);
+ skb_push(entry->skb, rt2x00dev->extra_tx_headroom);
+ memset(entry->skb->data, 0, rt2x00dev->extra_tx_headroom);
/*
* Call the driver's write_tx_data function, if it exists.
@@ -596,7 +596,7 @@ static void rt2x00queue_bar_check(struct queue_entry *entry)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct ieee80211_bar *bar = (void *) (entry->skb->data +
- rt2x00dev->ops->extra_tx_headroom);
+ rt2x00dev->extra_tx_headroom);
struct rt2x00_bar_list_entry *bar_entry;
if (likely(!ieee80211_is_back_req(bar->frame_control)))
@@ -936,13 +936,8 @@ void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
spin_unlock_irqrestore(&queue->index_lock, irqflags);
}
-void rt2x00queue_pause_queue(struct data_queue *queue)
+void rt2x00queue_pause_queue_nocheck(struct data_queue *queue)
{
- if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
- !test_bit(QUEUE_STARTED, &queue->flags) ||
- test_and_set_bit(QUEUE_PAUSED, &queue->flags))
- return;
-
switch (queue->qid) {
case QID_AC_VO:
case QID_AC_VI:
@@ -958,6 +953,15 @@ void rt2x00queue_pause_queue(struct data_queue *queue)
break;
}
}
+void rt2x00queue_pause_queue(struct data_queue *queue)
+{
+ if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
+ !test_bit(QUEUE_STARTED, &queue->flags) ||
+ test_and_set_bit(QUEUE_PAUSED, &queue->flags))
+ return;
+
+ rt2x00queue_pause_queue_nocheck(queue);
+}
EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
void rt2x00queue_unpause_queue(struct data_queue *queue)
@@ -1019,7 +1023,7 @@ void rt2x00queue_stop_queue(struct data_queue *queue)
return;
}
- rt2x00queue_pause_queue(queue);
+ rt2x00queue_pause_queue_nocheck(queue);
queue->rt2x00dev->ops->lib->stop_queue(queue);
@@ -1161,8 +1165,7 @@ void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
}
}
-static int rt2x00queue_alloc_entries(struct data_queue *queue,
- const struct data_queue_desc *qdesc)
+static int rt2x00queue_alloc_entries(struct data_queue *queue)
{
struct queue_entry *entries;
unsigned int entry_size;
@@ -1170,16 +1173,10 @@ static int rt2x00queue_alloc_entries(struct data_queue *queue,
rt2x00queue_reset(queue);
- queue->limit = qdesc->entry_num;
- queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
- queue->data_size = qdesc->data_size;
- queue->desc_size = qdesc->desc_size;
- queue->winfo_size = qdesc->winfo_size;
-
/*
* Allocate all queue entries.
*/
- entry_size = sizeof(*entries) + qdesc->priv_size;
+ entry_size = sizeof(*entries) + queue->priv_size;
entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
if (!entries)
return -ENOMEM;
@@ -1195,7 +1192,7 @@ static int rt2x00queue_alloc_entries(struct data_queue *queue,
entries[i].entry_idx = i;
entries[i].priv_data =
QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
- sizeof(*entries), qdesc->priv_size);
+ sizeof(*entries), queue->priv_size);
}
#undef QUEUE_ENTRY_PRIV_OFFSET
@@ -1237,23 +1234,22 @@ int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
struct data_queue *queue;
int status;
- status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
+ status = rt2x00queue_alloc_entries(rt2x00dev->rx);
if (status)
goto exit;
tx_queue_for_each(rt2x00dev, queue) {
- status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
+ status = rt2x00queue_alloc_entries(queue);
if (status)
goto exit;
}
- status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
+ status = rt2x00queue_alloc_entries(rt2x00dev->bcn);
if (status)
goto exit;
if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) {
- status = rt2x00queue_alloc_entries(rt2x00dev->atim,
- rt2x00dev->ops->atim);
+ status = rt2x00queue_alloc_entries(rt2x00dev->atim);
if (status)
goto exit;
}
@@ -1297,6 +1293,10 @@ static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
queue->aifs = 2;
queue->cw_min = 5;
queue->cw_max = 10;
+
+ rt2x00dev->ops->queue_init(queue);
+
+ queue->threshold = DIV_ROUND_UP(queue->limit, 10);
}
int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index 4a7b34e9261b..ebe117224979 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -453,6 +453,7 @@ enum data_queue_flags {
* @cw_max: The cw max value for outgoing frames (field ignored in RX queue).
* @data_size: Maximum data size for the frames in this queue.
* @desc_size: Hardware descriptor size for the data in this queue.
+ * @priv_size: Size of per-queue_entry private data.
* @usb_endpoint: Device endpoint used for communication (USB only)
* @usb_maxpacket: Max packet size for given endpoint (USB only)
*/
@@ -481,31 +482,13 @@ struct data_queue {
unsigned short data_size;
unsigned char desc_size;
unsigned char winfo_size;
+ unsigned short priv_size;
unsigned short usb_endpoint;
unsigned short usb_maxpacket;
};
/**
- * struct data_queue_desc: Data queue description
- *
- * The information in this structure is used by drivers
- * to inform rt2x00lib about the creation of the data queue.
- *
- * @entry_num: Maximum number of entries for a queue.
- * @data_size: Maximum data size for the frames in this queue.
- * @desc_size: Hardware descriptor size for the data in this queue.
- * @priv_size: Size of per-queue_entry private data.
- */
-struct data_queue_desc {
- unsigned short entry_num;
- unsigned short data_size;
- unsigned char desc_size;
- unsigned char winfo_size;
- unsigned short priv_size;
-};
-
-/**
* queue_end - Return pointer to the last queue (HELPER MACRO).
* @__dev: Pointer to &struct rt2x00_dev
*
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 0dc8180e251b..54d3ddfc9888 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2175,7 +2175,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
* that the TX_STA_FIFO stack has a size of 16. We stick to our
* tx ring size for now.
*/
- for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) {
+ for (i = 0; i < rt2x00dev->tx->limit; i++) {
rt2x00mmio_register_read(rt2x00dev, STA_CSR4, &reg);
if (!rt2x00_get_field32(reg, STA_CSR4_VALID))
break;
@@ -2825,7 +2825,8 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
for (i = 14; i < spec->num_channels; i++) {
info[i].max_power = MAX_TXPOWER;
- info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
+ info[i].default_power1 =
+ TXPOWER_FROM_DEV(tx_power[i - 14]);
}
}
@@ -3025,26 +3026,40 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
.config = rt61pci_config,
};
-static const struct data_queue_desc rt61pci_queue_rx = {
- .entry_num = 32,
- .data_size = DATA_FRAME_SIZE,
- .desc_size = RXD_DESC_SIZE,
- .priv_size = sizeof(struct queue_entry_priv_mmio),
-};
+static void rt61pci_queue_init(struct data_queue *queue)
+{
+ switch (queue->qid) {
+ case QID_RX:
+ queue->limit = 32;
+ queue->data_size = DATA_FRAME_SIZE;
+ queue->desc_size = RXD_DESC_SIZE;
+ queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+ break;
-static const struct data_queue_desc rt61pci_queue_tx = {
- .entry_num = 32,
- .data_size = DATA_FRAME_SIZE,
- .desc_size = TXD_DESC_SIZE,
- .priv_size = sizeof(struct queue_entry_priv_mmio),
-};
+ case QID_AC_VO:
+ case QID_AC_VI:
+ case QID_AC_BE:
+ case QID_AC_BK:
+ queue->limit = 32;
+ queue->data_size = DATA_FRAME_SIZE;
+ queue->desc_size = TXD_DESC_SIZE;
+ queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+ break;
-static const struct data_queue_desc rt61pci_queue_bcn = {
- .entry_num = 4,
- .data_size = 0, /* No DMA required for beacons */
- .desc_size = TXINFO_SIZE,
- .priv_size = sizeof(struct queue_entry_priv_mmio),
-};
+ case QID_BEACON:
+ queue->limit = 4;
+ queue->data_size = 0; /* No DMA required for beacons */
+ queue->desc_size = TXINFO_SIZE;
+ queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+ break;
+
+ case QID_ATIM:
+ /* fallthrough */
+ default:
+ BUG();
+ break;
+ }
+}
static const struct rt2x00_ops rt61pci_ops = {
.name = KBUILD_MODNAME,
@@ -3052,10 +3067,7 @@ static const struct rt2x00_ops rt61pci_ops = {
.eeprom_size = EEPROM_SIZE,
.rf_size = RF_SIZE,
.tx_queues = NUM_TX_QUEUES,
- .extra_tx_headroom = 0,
- .rx = &rt61pci_queue_rx,
- .tx = &rt61pci_queue_tx,
- .bcn = &rt61pci_queue_bcn,
+ .queue_init = rt61pci_queue_init,
.lib = &rt61pci_rt2x00_ops,
.hw = &rt61pci_mac80211_ops,
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 377e09bb0b81..1d3880e09a13 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2167,7 +2167,8 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
for (i = 14; i < spec->num_channels; i++) {
info[i].max_power = MAX_TXPOWER;
- info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
+ info[i].default_power1 =
+ TXPOWER_FROM_DEV(tx_power[i - 14]);
}
}
@@ -2359,26 +2360,40 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
.config = rt73usb_config,
};
-static const struct data_queue_desc rt73usb_queue_rx = {
- .entry_num = 32,
- .data_size = DATA_FRAME_SIZE,
- .desc_size = RXD_DESC_SIZE,
- .priv_size = sizeof(struct queue_entry_priv_usb),
-};
+static void rt73usb_queue_init(struct data_queue *queue)
+{
+ switch (queue->qid) {
+ case QID_RX:
+ queue->limit = 32;
+ queue->data_size = DATA_FRAME_SIZE;
+ queue->desc_size = RXD_DESC_SIZE;
+ queue->priv_size = sizeof(struct queue_entry_priv_usb);
+ break;
-static const struct data_queue_desc rt73usb_queue_tx = {
- .entry_num = 32,
- .data_size = DATA_FRAME_SIZE,
- .desc_size = TXD_DESC_SIZE,
- .priv_size = sizeof(struct queue_entry_priv_usb),
-};
+ case QID_AC_VO:
+ case QID_AC_VI:
+ case QID_AC_BE:
+ case QID_AC_BK:
+ queue->limit = 32;
+ queue->data_size = DATA_FRAME_SIZE;
+ queue->desc_size = TXD_DESC_SIZE;
+ queue->priv_size = sizeof(struct queue_entry_priv_usb);
+ break;
-static const struct data_queue_desc rt73usb_queue_bcn = {
- .entry_num = 4,
- .data_size = MGMT_FRAME_SIZE,
- .desc_size = TXINFO_SIZE,
- .priv_size = sizeof(struct queue_entry_priv_usb),
-};
+ case QID_BEACON:
+ queue->limit = 4;
+ queue->data_size = MGMT_FRAME_SIZE;
+ queue->desc_size = TXINFO_SIZE;
+ queue->priv_size = sizeof(struct queue_entry_priv_usb);
+ break;
+
+ case QID_ATIM:
+ /* fallthrough */
+ default:
+ BUG();
+ break;
+ }
+}
static const struct rt2x00_ops rt73usb_ops = {
.name = KBUILD_MODNAME,
@@ -2386,10 +2401,7 @@ static const struct rt2x00_ops rt73usb_ops = {
.eeprom_size = EEPROM_SIZE,
.rf_size = RF_SIZE,
.tx_queues = NUM_TX_QUEUES,
- .extra_tx_headroom = TXD_DESC_SIZE,
- .rx = &rt73usb_queue_rx,
- .tx = &rt73usb_queue_tx,
- .bcn = &rt73usb_queue_bcn,
+ .queue_init = rt73usb_queue_init,
.lib = &rt73usb_rt2x00_ops,
.hw = &rt73usb_mac80211_ops,
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
index 7253de3d8c66..c2ffce7a907c 100644
--- a/drivers/net/wireless/rtlwifi/Kconfig
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -1,27 +1,20 @@
-config RTLWIFI
- tristate "Realtek wireless card support"
- depends on MAC80211
- select FW_LOADER
- ---help---
- This is common code for RTL8192CE/RTL8192CU/RTL8192SE/RTL8723AE
- drivers. This module does nothing by itself - the various front-end
- drivers need to be enabled to support any desired devices.
-
- If you choose to build as a module, it'll be called rtlwifi.
-
-config RTLWIFI_DEBUG
- bool "Debugging output for rtlwifi driver family"
- depends on RTLWIFI
+menuconfig RTL_CARDS
+ tristate "Realtek rtlwifi family of devices"
+ depends on MAC80211 && (PCI || USB)
default y
---help---
- To use the module option that sets the dynamic-debugging level for,
- the front-end driver, this parameter must be "Y". For memory-limited
- systems, choose "N". If in doubt, choose "Y".
+ This option will enable support for the Realtek mac80211-based
+ wireless drivers. Drivers rtl8192ce, rtl8192cu, rtl8192se, rtl8192de,
+ rtl8723eu, and rtl8188eu share some common code.
+
+if RTL_CARDS
config RTL8192CE
tristate "Realtek RTL8192CE/RTL8188CE Wireless Network Adapter"
- depends on RTLWIFI && PCI
+ depends on PCI
select RTL8192C_COMMON
+ select RTLWIFI
+ select RTLWIFI_PCI
---help---
This is the driver for Realtek RTL8192CE/RTL8188CE 802.11n PCIe
wireless network adapters.
@@ -30,7 +23,9 @@ config RTL8192CE
config RTL8192SE
tristate "Realtek RTL8192SE/RTL8191SE PCIe Wireless Network Adapter"
- depends on RTLWIFI && PCI
+ depends on PCI
+ select RTLWIFI
+ select RTLWIFI_PCI
---help---
This is the driver for Realtek RTL8192SE/RTL8191SE 802.11n PCIe
wireless network adapters.
@@ -39,7 +34,9 @@ config RTL8192SE
config RTL8192DE
tristate "Realtek RTL8192DE/RTL8188DE PCIe Wireless Network Adapter"
- depends on RTLWIFI && PCI
+ depends on PCI
+ select RTLWIFI
+ select RTLWIFI_PCI
---help---
This is the driver for Realtek RTL8192DE/RTL8188DE 802.11n PCIe
wireless network adapters.
@@ -48,7 +45,9 @@ config RTL8192DE
config RTL8723AE
tristate "Realtek RTL8723AE PCIe Wireless Network Adapter"
- depends on RTLWIFI && PCI
+ depends on PCI
+ select RTLWIFI
+ select RTLWIFI_PCI
---help---
This is the driver for Realtek RTL8723AE 802.11n PCIe
wireless network adapters.
@@ -57,7 +56,9 @@ config RTL8723AE
config RTL8188EE
tristate "Realtek RTL8188EE Wireless Network Adapter"
- depends on RTLWIFI && PCI
+ depends on PCI
+ select RTLWIFI
+ select RTLWIFI_PCI
---help---
This is the driver for Realtek RTL8188EE 802.11n PCIe
wireless network adapters.
@@ -66,7 +67,9 @@ config RTL8188EE
config RTL8192CU
tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter"
- depends on RTLWIFI && USB
+ depends on USB
+ select RTLWIFI
+ select RTLWIFI_USB
select RTL8192C_COMMON
---help---
This is the driver for Realtek RTL8192CU/RTL8188CU 802.11n USB
@@ -74,7 +77,28 @@ config RTL8192CU
If you choose to build it as a module, it will be called rtl8192cu
+config RTLWIFI
+ tristate
+ select FW_LOADER
+
+config RTLWIFI_PCI
+ tristate
+
+config RTLWIFI_USB
+ tristate
+
+config RTLWIFI_DEBUG
+ bool "Debugging output for rtlwifi driver family"
+ depends on RTLWIFI
+ default y
+ ---help---
+ To use the module option that sets the dynamic-debugging level for,
+ the front-end driver, this parameter must be "Y". For memory-limited
+ systems, choose "N". If in doubt, choose "Y".
+
config RTL8192C_COMMON
tristate
depends on RTL8192CE || RTL8192CU
- default m
+ default y
+
+endif
diff --git a/drivers/net/wireless/rtlwifi/Makefile b/drivers/net/wireless/rtlwifi/Makefile
index ff02b874f8d8..d56f023a4b90 100644
--- a/drivers/net/wireless/rtlwifi/Makefile
+++ b/drivers/net/wireless/rtlwifi/Makefile
@@ -12,13 +12,11 @@ rtlwifi-objs := \
rtl8192c_common-objs += \
-ifneq ($(CONFIG_PCI),)
-rtlwifi-objs += pci.o
-endif
+obj-$(CONFIG_RTLWIFI_PCI) += rtl_pci.o
+rtl_pci-objs := pci.o
-ifneq ($(CONFIG_USB),)
-rtlwifi-objs += usb.o
-endif
+obj-$(CONFIG_RTLWIFI_USB) += rtl_usb.o
+rtl_usb-objs := usb.o
obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c/
obj-$(CONFIG_RTL8192CE) += rtl8192ce/
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index a5f223145b0f..7651f5acc14b 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -172,6 +172,7 @@ u8 rtl_tid_to_ac(u8 tid)
{
return tid_to_ac[tid];
}
+EXPORT_SYMBOL_GPL(rtl_tid_to_ac);
static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
struct ieee80211_sta_ht_cap *ht_cap)
@@ -406,6 +407,7 @@ void rtl_deinit_deferred_work(struct ieee80211_hw *hw)
cancel_delayed_work(&rtlpriv->works.ps_rfon_wq);
cancel_delayed_work(&rtlpriv->works.fwevt_wq);
}
+EXPORT_SYMBOL_GPL(rtl_deinit_deferred_work);
void rtl_init_rfkill(struct ieee80211_hw *hw)
{
@@ -439,6 +441,7 @@ void rtl_deinit_rfkill(struct ieee80211_hw *hw)
{
wiphy_rfkill_stop_polling(hw->wiphy);
}
+EXPORT_SYMBOL_GPL(rtl_deinit_rfkill);
int rtl_init_core(struct ieee80211_hw *hw)
{
@@ -489,10 +492,12 @@ int rtl_init_core(struct ieee80211_hw *hw)
return 0;
}
+EXPORT_SYMBOL_GPL(rtl_init_core);
void rtl_deinit_core(struct ieee80211_hw *hw)
{
}
+EXPORT_SYMBOL_GPL(rtl_deinit_core);
void rtl_init_rx_config(struct ieee80211_hw *hw)
{
@@ -501,6 +506,7 @@ void rtl_init_rx_config(struct ieee80211_hw *hw)
rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *) (&mac->rx_conf));
}
+EXPORT_SYMBOL_GPL(rtl_init_rx_config);
/*********************************************************
*
@@ -879,6 +885,7 @@ bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb)
return true;
}
+EXPORT_SYMBOL_GPL(rtl_tx_mgmt_proc);
void rtl_get_tcb_desc(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info,
@@ -1052,6 +1059,7 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
return true;
}
+EXPORT_SYMBOL_GPL(rtl_action_proc);
/*should call before software enc*/
u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
@@ -1125,6 +1133,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
return false;
}
+EXPORT_SYMBOL_GPL(rtl_is_special_data);
/*********************************************************
*
@@ -1300,6 +1309,7 @@ void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb)
rtlpriv->link_info.bcn_rx_inperiod++;
}
+EXPORT_SYMBOL_GPL(rtl_beacon_statistic);
void rtl_watchdog_wq_callback(void *data)
{
@@ -1793,6 +1803,7 @@ void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len)
mac->vendor = vendor;
}
+EXPORT_SYMBOL_GPL(rtl_recognize_peer);
/*********************************************************
*
@@ -1817,7 +1828,7 @@ static ssize_t rtl_store_debug_level(struct device *d,
unsigned long val;
int ret;
- ret = strict_strtoul(buf, 0, &val);
+ ret = kstrtoul(buf, 0, &val);
if (ret) {
printk(KERN_DEBUG "%s is not in hex or decimal form.\n", buf);
} else {
@@ -1849,6 +1860,7 @@ struct attribute_group rtl_attribute_group = {
.name = "rtlsysfs",
.attrs = rtl_sysfs_entries,
};
+EXPORT_SYMBOL_GPL(rtl_attribute_group);
MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
@@ -1856,7 +1868,8 @@ MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
-struct rtl_global_var global_var = {};
+struct rtl_global_var rtl_global_var = {};
+EXPORT_SYMBOL_GPL(rtl_global_var);
static int __init rtl_core_module_init(void)
{
@@ -1864,8 +1877,8 @@ static int __init rtl_core_module_init(void)
pr_err("Unable to register rtl_rc, use default RC !!\n");
/* init some global vars */
- INIT_LIST_HEAD(&global_var.glb_priv_list);
- spin_lock_init(&global_var.glb_list_lock);
+ INIT_LIST_HEAD(&rtl_global_var.glb_priv_list);
+ spin_lock_init(&rtl_global_var.glb_list_lock);
return 0;
}
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index 8576bc34b032..0e5fe0902daf 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -147,7 +147,7 @@ void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len);
u8 rtl_tid_to_ac(u8 tid);
extern struct attribute_group rtl_attribute_group;
void rtl_easy_concurrent_retrytimer_callback(unsigned long data);
-extern struct rtl_global_var global_var;
+extern struct rtl_global_var rtl_global_var;
int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
bool isht, u8 desc_rate, bool first_ampdu);
bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index ee84844be008..733b7ce7f0e2 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -1330,3 +1330,4 @@ const struct ieee80211_ops rtl_ops = {
.rfkill_poll = rtl_op_rfkill_poll,
.flush = rtl_op_flush,
};
+EXPORT_SYMBOL_GPL(rtl_ops);
diff --git a/drivers/net/wireless/rtlwifi/debug.c b/drivers/net/wireless/rtlwifi/debug.c
index 7d52d3d7769f..76e2086e137e 100644
--- a/drivers/net/wireless/rtlwifi/debug.c
+++ b/drivers/net/wireless/rtlwifi/debug.c
@@ -51,3 +51,4 @@ void rtl_dbgp_flag_init(struct ieee80211_hw *hw)
/*Init Debug flag enable condition */
}
+EXPORT_SYMBOL_GPL(rtl_dbgp_flag_init);
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index 9e3894178e77..838a1ed3f194 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -229,6 +229,7 @@ void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf)
*pbuf = (u8) (value32 & 0xff);
}
+EXPORT_SYMBOL_GPL(read_efuse_byte);
void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
{
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index c97e9d327331..703f839af6ca 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -35,6 +35,13 @@
#include "efuse.h"
#include <linux/export.h>
#include <linux/kmemleak.h>
+#include <linux/module.h>
+
+MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
+MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PCI basic driver for rtlwifi");
static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = {
PCI_VENDOR_ID_INTEL,
@@ -1008,19 +1015,6 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
return;
}
-static void rtl_lps_change_work_callback(struct work_struct *work)
-{
- struct rtl_works *rtlworks =
- container_of(work, struct rtl_works, lps_change_work);
- struct ieee80211_hw *hw = rtlworks->hw;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- if (rtlpriv->enter_ps)
- rtl_lps_enter(hw);
- else
- rtl_lps_leave(hw);
-}
-
static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
{
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -1899,7 +1893,7 @@ int rtl_pci_probe(struct pci_dev *pdev,
rtlpriv->rtlhal.interface = INTF_PCI;
rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
rtlpriv->intf_ops = &rtl_pci_ops;
- rtlpriv->glb_var = &global_var;
+ rtlpriv->glb_var = &rtl_global_var;
/*
*init dbgp flags before all
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index 884bceae38a9..298b615964e8 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -269,6 +269,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
spin_unlock_irqrestore(&rtlpriv->locks.ips_lock, flags);
}
+EXPORT_SYMBOL_GPL(rtl_ips_nic_on);
/*for FW LPS*/
@@ -518,6 +519,7 @@ void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
"u_bufferd: %x, m_buffered: %x\n", u_buffed, m_buffed);
}
}
+EXPORT_SYMBOL_GPL(rtl_swlps_beacon);
void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
{
@@ -611,6 +613,19 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
MSECS(sleep_intv * mac->vif->bss_conf.beacon_int - 40));
}
+void rtl_lps_change_work_callback(struct work_struct *work)
+{
+ struct rtl_works *rtlworks =
+ container_of(work, struct rtl_works, lps_change_work);
+ struct ieee80211_hw *hw = rtlworks->hw;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->enter_ps)
+ rtl_lps_enter(hw);
+ else
+ rtl_lps_leave(hw);
+}
+EXPORT_SYMBOL_GPL(rtl_lps_change_work_callback);
void rtl_swlps_wq_callback(void *data)
{
@@ -922,3 +937,4 @@ void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len)
else
rtl_p2p_noa_ie(hw, data, len - FCS_LEN);
}
+EXPORT_SYMBOL_GPL(rtl_p2p_info);
diff --git a/drivers/net/wireless/rtlwifi/ps.h b/drivers/net/wireless/rtlwifi/ps.h
index 4d682b753f50..88bd76ea88f7 100644
--- a/drivers/net/wireless/rtlwifi/ps.h
+++ b/drivers/net/wireless/rtlwifi/ps.h
@@ -49,5 +49,6 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw);
void rtl_swlps_rf_sleep(struct ieee80211_hw *hw);
void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state);
void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len);
+void rtl_lps_change_work_callback(struct work_struct *work);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
index 953f1a0f8532..2119313a737b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
@@ -104,7 +104,7 @@ void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
tx_agc[RF90_PATH_A] = 0x10101010;
tx_agc[RF90_PATH_B] = 0x10101010;
} else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
- TXHIGHPWRLEVEL_LEVEL1) {
+ TXHIGHPWRLEVEL_LEVEL2) {
tx_agc[RF90_PATH_A] = 0x00000000;
tx_agc[RF90_PATH_B] = 0x00000000;
} else{
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 826f085c29dd..2bd598526217 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -359,6 +359,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
{RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
{RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
+ {RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/
{RTL_USB_DEVICE(0x7392, 0x7822, rtl92cu_hal_cfg)}, /*Edimax -Edimax*/
{}
};
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index 19a765532603..47875ba09ff8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -842,7 +842,7 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
long val_y, ele_c = 0;
u8 ofdm_index[2];
s8 cck_index = 0;
- u8 ofdm_index_old[2];
+ u8 ofdm_index_old[2] = {0, 0};
s8 cck_index_old = 0;
u8 index;
int i;
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
index e4c4cdc3eb67..d9ee2efffe5f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
@@ -251,7 +251,7 @@ static struct rtl_hal_cfg rtl8723ae_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl8723ae_pci",
- .fw_name = "rtlwifi/rtl8723aefw.bin",
+ .fw_name = "rtlwifi/rtl8723fw.bin",
.ops = &rtl8723ae_hal_ops,
.mod_params = &rtl8723ae_mod_params,
.maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
@@ -353,8 +353,8 @@ MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Realtek 8723E 802.11n PCI wireless");
-MODULE_FIRMWARE("rtlwifi/rtl8723aefw.bin");
-MODULE_FIRMWARE("rtlwifi/rtl8723aefw_B.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8723fw.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8723fw_B.bin");
module_param_named(swenc, rtl8723ae_mod_params.sw_crypto, bool, 0444);
module_param_named(debug, rtl8723ae_mod_params.debug, int, 0444);
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index a3532e077871..e56778cac9bf 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -32,6 +32,13 @@
#include "ps.h"
#include "rtl8192c/fw_common.h"
#include <linux/export.h>
+#include <linux/module.h>
+
+MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
+MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("USB basic driver for rtlwifi");
#define REALTEK_USB_VENQT_READ 0xC0
#define REALTEK_USB_VENQT_WRITE 0x40
@@ -1070,6 +1077,8 @@ int rtl_usb_probe(struct usb_interface *intf,
spin_lock_init(&rtlpriv->locks.usb_lock);
INIT_WORK(&rtlpriv->works.fill_h2c_cmd,
rtl_fill_h2c_cmd_work_callback);
+ INIT_WORK(&rtlpriv->works.lps_change_work,
+ rtl_lps_change_work_callback);
rtlpriv->usb_data_index = 0;
init_completion(&rtlpriv->firmware_loading_complete);
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index 4c67c2f9ea71..c7dc6feab2ff 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -93,8 +93,7 @@ static void wl1251_spi_wake(struct wl1251 *wl)
memset(&t, 0, sizeof(t));
spi_message_init(&m);
- /*
- * Set WSPI_INIT_COMMAND
+ /* Set WSPI_INIT_COMMAND
* the data is being send from the MSB to LSB
*/
cmd[2] = 0xff;
@@ -262,7 +261,8 @@ static int wl1251_spi_probe(struct spi_device *spi)
wl->if_ops = &wl1251_spi_ops;
/* This is the only SPI value that we need to set here, the rest
- * comes from the board-peripherals file */
+ * comes from the board-peripherals file
+ */
spi->bits_per_word = 32;
ret = spi_setup(spi);
@@ -329,29 +329,7 @@ static struct spi_driver wl1251_spi_driver = {
.remove = wl1251_spi_remove,
};
-static int __init wl1251_spi_init(void)
-{
- int ret;
-
- ret = spi_register_driver(&wl1251_spi_driver);
- if (ret < 0) {
- wl1251_error("failed to register spi driver: %d", ret);
- goto out;
- }
-
-out:
- return ret;
-}
-
-static void __exit wl1251_spi_exit(void)
-{
- spi_unregister_driver(&wl1251_spi_driver);
-
- wl1251_notice("unloaded");
-}
-
-module_init(wl1251_spi_init);
-module_exit(wl1251_spi_exit);
+module_spi_driver(wl1251_spi_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>");
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 9fa692d11025..7aa0eb848c5a 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -23,6 +23,7 @@
#include <linux/platform_device.h>
#include <linux/ip.h>
#include <linux/firmware.h>
+#include <linux/etherdevice.h>
#include "../wlcore/wlcore.h"
#include "../wlcore/debug.h"
@@ -594,8 +595,8 @@ static const struct wlcore_partition_set wl18xx_ptable[PART_TABLE_LEN] = {
.mem3 = { .start = 0x00000000, .size = 0x00000000 },
},
[PART_PHY_INIT] = {
- .mem = { .start = 0x80926000,
- .size = sizeof(struct wl18xx_mac_and_phy_params) },
+ .mem = { .start = WL18XX_PHY_INIT_MEM_ADDR,
+ .size = WL18XX_PHY_INIT_MEM_SIZE },
.reg = { .start = 0x00000000, .size = 0x00000000 },
.mem2 = { .start = 0x00000000, .size = 0x00000000 },
.mem3 = { .start = 0x00000000, .size = 0x00000000 },
@@ -799,6 +800,9 @@ static int wl18xx_pre_upload(struct wl1271 *wl)
u32 tmp;
int ret;
+ BUILD_BUG_ON(sizeof(struct wl18xx_mac_and_phy_params) >
+ WL18XX_PHY_INIT_MEM_SIZE);
+
ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
if (ret < 0)
goto out;
@@ -815,6 +819,35 @@ static int wl18xx_pre_upload(struct wl1271 *wl)
wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp);
ret = wlcore_read32(wl, WL18XX_SCR_PAD2, &tmp);
+ if (ret < 0)
+ goto out;
+
+ /*
+ * Workaround for FDSP code RAM corruption (needed for PG2.1
+ * and newer; for older chips it's a NOP). Change FDSP clock
+ * settings so that it's muxed to the ATGP clock instead of
+ * its own clock.
+ */
+
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_PHY_INIT]);
+ if (ret < 0)
+ goto out;
+
+ /* disable FDSP clock */
+ ret = wlcore_write32(wl, WL18XX_PHY_FPGA_SPARE_1,
+ MEM_FDSP_CLK_120_DISABLE);
+ if (ret < 0)
+ goto out;
+
+ /* set ATPG clock toward FDSP Code RAM rather than its own clock */
+ ret = wlcore_write32(wl, WL18XX_PHY_FPGA_SPARE_1,
+ MEM_FDSP_CODERAM_FUNC_CLK_SEL);
+ if (ret < 0)
+ goto out;
+
+ /* re-enable FDSP clock */
+ ret = wlcore_write32(wl, WL18XX_PHY_FPGA_SPARE_1,
+ MEM_FDSP_CLK_120_ENABLE);
out:
return ret;
@@ -1286,6 +1319,16 @@ static int wl18xx_get_mac(struct wl1271 *wl)
((mac1 & 0xff000000) >> 24);
wl->fuse_nic_addr = (mac1 & 0xffffff);
+ if (!wl->fuse_oui_addr && !wl->fuse_nic_addr) {
+ u8 mac[ETH_ALEN];
+
+ eth_random_addr(mac);
+
+ wl->fuse_oui_addr = (mac[0] << 16) + (mac[1] << 8) + mac[2];
+ wl->fuse_nic_addr = (mac[3] << 16) + (mac[4] << 8) + mac[5];
+ wl1271_warning("MAC address from fuse not available, using random locally administered addresses.");
+ }
+
ret = wlcore_set_partition(wl, &wl->ptable[PART_DOWN]);
out:
diff --git a/drivers/net/wireless/ti/wl18xx/reg.h b/drivers/net/wireless/ti/wl18xx/reg.h
index 6306e04cd258..05dd8bad2746 100644
--- a/drivers/net/wireless/ti/wl18xx/reg.h
+++ b/drivers/net/wireless/ti/wl18xx/reg.h
@@ -38,6 +38,9 @@
#define WL18XX_REG_BOOT_PART_SIZE 0x00014578
#define WL18XX_PHY_INIT_MEM_ADDR 0x80926000
+#define WL18XX_PHY_END_MEM_ADDR 0x8093CA44
+#define WL18XX_PHY_INIT_MEM_SIZE \
+ (WL18XX_PHY_END_MEM_ADDR - WL18XX_PHY_INIT_MEM_ADDR)
#define WL18XX_SDIO_WSPI_BASE (WL18XX_REGISTERS_BASE)
#define WL18XX_REG_CONFIG_BASE (WL18XX_REGISTERS_BASE + 0x02000)
@@ -217,4 +220,16 @@ static const char * const rdl_names[] = {
[RDL_4_SP] = "1897 MIMO",
};
+/* FPGA_SPARE_1 register - used to change the PHY ATPG clock at boot time */
+#define WL18XX_PHY_FPGA_SPARE_1 0x8093CA40
+
+/* command to disable FDSP clock */
+#define MEM_FDSP_CLK_120_DISABLE 0x80000000
+
+/* command to set ATPG clock toward FDSP Code RAM rather than its own clock */
+#define MEM_FDSP_CODERAM_FUNC_CLK_SEL 0xC0000000
+
+/* command to re-enable FDSP clock */
+#define MEM_FDSP_CLK_120_ENABLE 0x40000000
+
#endif /* __REG_H__ */
diff --git a/drivers/net/wireless/ti/wlcore/Makefile b/drivers/net/wireless/ti/wlcore/Makefile
index b21398f6c3ec..4f23931d7bd5 100644
--- a/drivers/net/wireless/ti/wlcore/Makefile
+++ b/drivers/net/wireless/ti/wlcore/Makefile
@@ -1,5 +1,5 @@
wlcore-objs = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \
- boot.o init.o debugfs.o scan.o
+ boot.o init.o debugfs.o scan.o sysfs.o
wlcore_spi-objs = spi.o
wlcore_sdio-objs = sdio.o
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 953111a502ee..b8db55c868c7 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -1,10 +1,9 @@
/*
- * This file is part of wl1271
+ * This file is part of wlcore
*
* Copyright (C) 2008-2010 Nokia Corporation
- *
- * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ * Copyright (C) 2011-2013 Texas Instruments Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -24,34 +23,23 @@
#include <linux/module.h>
#include <linux/firmware.h>
-#include <linux/delay.h>
-#include <linux/spi/spi.h>
-#include <linux/crc32.h>
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
#include <linux/wl12xx.h>
-#include <linux/sched.h>
#include <linux/interrupt.h>
#include "wlcore.h"
#include "debug.h"
#include "wl12xx_80211.h"
#include "io.h"
-#include "event.h"
#include "tx.h"
-#include "rx.h"
#include "ps.h"
#include "init.h"
#include "debugfs.h"
-#include "cmd.h"
-#include "boot.h"
#include "testmode.h"
#include "scan.h"
#include "hw_ops.h"
-
-#define WL1271_BOOT_RETRIES 3
+#include "sysfs.h"
#define WL1271_BOOT_RETRIES 3
@@ -65,8 +53,7 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
static void wlcore_op_stop_locked(struct wl1271 *wl);
static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
-static int wl12xx_set_authorized(struct wl1271 *wl,
- struct wl12xx_vif *wlvif)
+static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret;
@@ -983,7 +970,7 @@ static int wlcore_fw_wakeup(struct wl1271 *wl)
static int wl1271_setup(struct wl1271 *wl)
{
- wl->fw_status_1 = kmalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
+ wl->fw_status_1 = kzalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
sizeof(*wl->fw_status_2) +
wl->fw_status_priv_len, GFP_KERNEL);
if (!wl->fw_status_1)
@@ -993,7 +980,7 @@ static int wl1271_setup(struct wl1271 *wl)
(((u8 *) wl->fw_status_1) +
WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
- wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
+ wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
if (!wl->tx_res_if) {
kfree(wl->fw_status_1);
return -ENOMEM;
@@ -1668,8 +1655,7 @@ static int wl1271_configure_suspend(struct wl1271 *wl,
return 0;
}
-static void wl1271_configure_resume(struct wl1271 *wl,
- struct wl12xx_vif *wlvif)
+static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret = 0;
bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
@@ -2603,6 +2589,7 @@ unlock:
cancel_work_sync(&wlvif->rx_streaming_enable_work);
cancel_work_sync(&wlvif->rx_streaming_disable_work);
cancel_delayed_work_sync(&wlvif->connection_loss_work);
+ cancel_delayed_work_sync(&wlvif->channel_switch_work);
mutex_lock(&wl->mutex);
}
@@ -3210,14 +3197,6 @@ static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
if (ret < 0)
return ret;
- /* the default WEP key needs to be configured at least once */
- if (key_type == KEY_WEP) {
- ret = wl12xx_cmd_set_default_wep_key(wl,
- wlvif->default_key,
- wlvif->sta.hlid);
- if (ret < 0)
- return ret;
- }
}
return 0;
@@ -3374,6 +3353,46 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
}
EXPORT_SYMBOL_GPL(wlcore_set_key);
+static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ int key_idx)
+{
+ struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ int ret;
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
+ key_idx);
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state != WLCORE_STATE_ON)) {
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out_unlock;
+
+ wlvif->default_key = key_idx;
+
+ /* the default WEP key needs to be configured at least once */
+ if (wlvif->encryption_type == KEY_WEP) {
+ ret = wl12xx_cmd_set_default_wep_key(wl,
+ key_idx,
+ wlvif->sta.hlid);
+ if (ret < 0)
+ goto out_sleep;
+ }
+
+out_sleep:
+ wl1271_ps_elp_sleep(wl);
+
+out_unlock:
+ mutex_unlock(&wl->mutex);
+}
+
void wlcore_regdomain_config(struct wl1271 *wl)
{
int ret;
@@ -3782,8 +3801,7 @@ static int wlcore_set_beacon_template(struct wl1271 *wl,
struct ieee80211_hdr *hdr;
u32 min_rate;
int ret;
- int ieoffset = offsetof(struct ieee80211_mgmt,
- u.beacon.variable);
+ int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
u16 tmpl_id;
@@ -4230,8 +4248,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
}
/* Handle new association with HT. Do this after join. */
- if (sta_exists &&
- (changed & BSS_CHANGED_HT)) {
+ if (sta_exists) {
bool enabled =
bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
@@ -5368,6 +5385,7 @@ static const struct ieee80211_ops wl1271_ops = {
.ampdu_action = wl1271_op_ampdu_action,
.tx_frames_pending = wl1271_tx_frames_pending,
.set_bitrate_mask = wl12xx_set_bitrate_mask,
+ .set_default_unicast_key = wl1271_op_set_default_key_idx,
.channel_switch = wl12xx_op_channel_switch,
.flush = wlcore_op_flush,
.remain_on_channel = wlcore_op_remain_on_channel,
@@ -5403,151 +5421,6 @@ u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
return idx;
}
-static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct wl1271 *wl = dev_get_drvdata(dev);
- ssize_t len;
-
- len = PAGE_SIZE;
-
- mutex_lock(&wl->mutex);
- len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
- wl->sg_enabled);
- mutex_unlock(&wl->mutex);
-
- return len;
-
-}
-
-static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct wl1271 *wl = dev_get_drvdata(dev);
- unsigned long res;
- int ret;
-
- ret = kstrtoul(buf, 10, &res);
- if (ret < 0) {
- wl1271_warning("incorrect value written to bt_coex_mode");
- return count;
- }
-
- mutex_lock(&wl->mutex);
-
- res = !!res;
-
- if (res == wl->sg_enabled)
- goto out;
-
- wl->sg_enabled = res;
-
- if (unlikely(wl->state != WLCORE_STATE_ON))
- goto out;
-
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out;
-
- wl1271_acx_sg_enable(wl, wl->sg_enabled);
- wl1271_ps_elp_sleep(wl);
-
- out:
- mutex_unlock(&wl->mutex);
- return count;
-}
-
-static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR,
- wl1271_sysfs_show_bt_coex_state,
- wl1271_sysfs_store_bt_coex_state);
-
-static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct wl1271 *wl = dev_get_drvdata(dev);
- ssize_t len;
-
- len = PAGE_SIZE;
-
- mutex_lock(&wl->mutex);
- if (wl->hw_pg_ver >= 0)
- len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
- else
- len = snprintf(buf, len, "n/a\n");
- mutex_unlock(&wl->mutex);
-
- return len;
-}
-
-static DEVICE_ATTR(hw_pg_ver, S_IRUGO,
- wl1271_sysfs_show_hw_pg_ver, NULL);
-
-static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buffer, loff_t pos, size_t count)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct wl1271 *wl = dev_get_drvdata(dev);
- ssize_t len;
- int ret;
-
- ret = mutex_lock_interruptible(&wl->mutex);
- if (ret < 0)
- return -ERESTARTSYS;
-
- /* Let only one thread read the log at a time, blocking others */
- while (wl->fwlog_size == 0) {
- DEFINE_WAIT(wait);
-
- prepare_to_wait_exclusive(&wl->fwlog_waitq,
- &wait,
- TASK_INTERRUPTIBLE);
-
- if (wl->fwlog_size != 0) {
- finish_wait(&wl->fwlog_waitq, &wait);
- break;
- }
-
- mutex_unlock(&wl->mutex);
-
- schedule();
- finish_wait(&wl->fwlog_waitq, &wait);
-
- if (signal_pending(current))
- return -ERESTARTSYS;
-
- ret = mutex_lock_interruptible(&wl->mutex);
- if (ret < 0)
- return -ERESTARTSYS;
- }
-
- /* Check if the fwlog is still valid */
- if (wl->fwlog_size < 0) {
- mutex_unlock(&wl->mutex);
- return 0;
- }
-
- /* Seeking is not supported - old logs are not kept. Disregard pos. */
- len = min(count, (size_t)wl->fwlog_size);
- wl->fwlog_size -= len;
- memcpy(buffer, wl->fwlog, len);
-
- /* Make room for new messages */
- memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size);
-
- mutex_unlock(&wl->mutex);
-
- return len;
-}
-
-static struct bin_attribute fwlog_attr = {
- .attr = {.name = "fwlog", .mode = S_IRUSR},
- .read = wl1271_sysfs_read_fwlog,
-};
-
static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
{
int i;
@@ -5827,8 +5700,6 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
return 0;
}
-#define WL1271_DEFAULT_CHANNEL 0
-
struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
u32 mbox_size)
{
@@ -5881,7 +5752,7 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
goto err_hw;
}
- wl->channel = WL1271_DEFAULT_CHANNEL;
+ wl->channel = 0;
wl->rx_counter = 0;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
wl->band = IEEE80211_BAND_2GHZ;
@@ -5988,11 +5859,8 @@ int wlcore_free_hw(struct wl1271 *wl)
wake_up_interruptible_all(&wl->fwlog_waitq);
mutex_unlock(&wl->mutex);
- device_remove_bin_file(wl->dev, &fwlog_attr);
-
- device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
+ wlcore_sysfs_free(wl);
- device_remove_file(wl->dev, &dev_attr_bt_coex_state);
kfree(wl->buffer_32);
kfree(wl->mbox);
free_page((unsigned long)wl->fwlog);
@@ -6018,6 +5886,15 @@ int wlcore_free_hw(struct wl1271 *wl)
}
EXPORT_SYMBOL_GPL(wlcore_free_hw);
+#ifdef CONFIG_PM
+static const struct wiphy_wowlan_support wlcore_wowlan_support = {
+ .flags = WIPHY_WOWLAN_ANY,
+ .n_patterns = WL1271_MAX_RX_FILTERS,
+ .pattern_min_len = 1,
+ .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
+};
+#endif
+
static void wlcore_nvs_cb(const struct firmware *fw, void *context)
{
struct wl1271 *wl = context;
@@ -6071,14 +5948,8 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
if (!ret) {
wl->irq_wake_enabled = true;
device_init_wakeup(wl->dev, 1);
- if (pdata->pwr_in_suspend) {
- wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
- wl->hw->wiphy->wowlan.n_patterns =
- WL1271_MAX_RX_FILTERS;
- wl->hw->wiphy->wowlan.pattern_min_len = 1;
- wl->hw->wiphy->wowlan.pattern_max_len =
- WL1271_RX_FILTER_MAX_PATTERN_SIZE;
- }
+ if (pdata->pwr_in_suspend)
+ wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
}
#endif
disable_irq(wl->irq);
@@ -6101,36 +5972,13 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
if (ret)
goto out_irq;
- /* Create sysfs file to control bt coex state */
- ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
- if (ret < 0) {
- wl1271_error("failed to create sysfs file bt_coex_state");
+ ret = wlcore_sysfs_init(wl);
+ if (ret)
goto out_unreg;
- }
-
- /* Create sysfs file to get HW PG version */
- ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
- if (ret < 0) {
- wl1271_error("failed to create sysfs file hw_pg_ver");
- goto out_bt_coex_state;
- }
-
- /* Create sysfs file for the FW log */
- ret = device_create_bin_file(wl->dev, &fwlog_attr);
- if (ret < 0) {
- wl1271_error("failed to create sysfs file fwlog");
- goto out_hw_pg_ver;
- }
wl->initialized = true;
goto out;
-out_hw_pg_ver:
- device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
-
-out_bt_coex_state:
- device_remove_file(wl->dev, &dev_attr_bt_coex_state);
-
out_unreg:
wl1271_unregister_hw(wl);
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index 9654577efd01..98066d40c2ad 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -110,7 +110,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl)
DECLARE_COMPLETION_ONSTACK(compl);
unsigned long flags;
int ret;
- u32 start_time = jiffies;
+ unsigned long start_time = jiffies;
bool pending = false;
/*
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index e26447832683..1b0cd98e35f1 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -434,19 +434,7 @@ static struct spi_driver wl1271_spi_driver = {
.remove = wl1271_remove,
};
-static int __init wl1271_init(void)
-{
- return spi_register_driver(&wl1271_spi_driver);
-}
-
-static void __exit wl1271_exit(void)
-{
- spi_unregister_driver(&wl1271_spi_driver);
-}
-
-module_init(wl1271_init);
-module_exit(wl1271_exit);
-
+module_spi_driver(wl1271_spi_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c
new file mode 100644
index 000000000000..8e583497940d
--- /dev/null
+++ b/drivers/net/wireless/ti/wlcore/sysfs.c
@@ -0,0 +1,216 @@
+/*
+ * This file is part of wlcore
+ *
+ * Copyright (C) 2013 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include "wlcore.h"
+#include "debug.h"
+#include "ps.h"
+#include "sysfs.h"
+
+static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct wl1271 *wl = dev_get_drvdata(dev);
+ ssize_t len;
+
+ len = PAGE_SIZE;
+
+ mutex_lock(&wl->mutex);
+ len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
+ wl->sg_enabled);
+ mutex_unlock(&wl->mutex);
+
+ return len;
+
+}
+
+static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct wl1271 *wl = dev_get_drvdata(dev);
+ unsigned long res;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &res);
+ if (ret < 0) {
+ wl1271_warning("incorrect value written to bt_coex_mode");
+ return count;
+ }
+
+ mutex_lock(&wl->mutex);
+
+ res = !!res;
+
+ if (res == wl->sg_enabled)
+ goto out;
+
+ wl->sg_enabled = res;
+
+ if (unlikely(wl->state != WLCORE_STATE_ON))
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ wl1271_acx_sg_enable(wl, wl->sg_enabled);
+ wl1271_ps_elp_sleep(wl);
+
+ out:
+ mutex_unlock(&wl->mutex);
+ return count;
+}
+
+static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR,
+ wl1271_sysfs_show_bt_coex_state,
+ wl1271_sysfs_store_bt_coex_state);
+
+static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct wl1271 *wl = dev_get_drvdata(dev);
+ ssize_t len;
+
+ len = PAGE_SIZE;
+
+ mutex_lock(&wl->mutex);
+ if (wl->hw_pg_ver >= 0)
+ len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
+ else
+ len = snprintf(buf, len, "n/a\n");
+ mutex_unlock(&wl->mutex);
+
+ return len;
+}
+
+static DEVICE_ATTR(hw_pg_ver, S_IRUGO,
+ wl1271_sysfs_show_hw_pg_ver, NULL);
+
+static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t pos, size_t count)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct wl1271 *wl = dev_get_drvdata(dev);
+ ssize_t len;
+ int ret;
+
+ ret = mutex_lock_interruptible(&wl->mutex);
+ if (ret < 0)
+ return -ERESTARTSYS;
+
+ /* Let only one thread read the log at a time, blocking others */
+ while (wl->fwlog_size == 0) {
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait_exclusive(&wl->fwlog_waitq,
+ &wait,
+ TASK_INTERRUPTIBLE);
+
+ if (wl->fwlog_size != 0) {
+ finish_wait(&wl->fwlog_waitq, &wait);
+ break;
+ }
+
+ mutex_unlock(&wl->mutex);
+
+ schedule();
+ finish_wait(&wl->fwlog_waitq, &wait);
+
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+
+ ret = mutex_lock_interruptible(&wl->mutex);
+ if (ret < 0)
+ return -ERESTARTSYS;
+ }
+
+ /* Check if the fwlog is still valid */
+ if (wl->fwlog_size < 0) {
+ mutex_unlock(&wl->mutex);
+ return 0;
+ }
+
+ /* Seeking is not supported - old logs are not kept. Disregard pos. */
+ len = min(count, (size_t)wl->fwlog_size);
+ wl->fwlog_size -= len;
+ memcpy(buffer, wl->fwlog, len);
+
+ /* Make room for new messages */
+ memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size);
+
+ mutex_unlock(&wl->mutex);
+
+ return len;
+}
+
+static struct bin_attribute fwlog_attr = {
+ .attr = {.name = "fwlog", .mode = S_IRUSR},
+ .read = wl1271_sysfs_read_fwlog,
+};
+
+int wlcore_sysfs_init(struct wl1271 *wl)
+{
+ int ret;
+
+ /* Create sysfs file to control bt coex state */
+ ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
+ if (ret < 0) {
+ wl1271_error("failed to create sysfs file bt_coex_state");
+ goto out;
+ }
+
+ /* Create sysfs file to get HW PG version */
+ ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
+ if (ret < 0) {
+ wl1271_error("failed to create sysfs file hw_pg_ver");
+ goto out_bt_coex_state;
+ }
+
+ /* Create sysfs file for the FW log */
+ ret = device_create_bin_file(wl->dev, &fwlog_attr);
+ if (ret < 0) {
+ wl1271_error("failed to create sysfs file fwlog");
+ goto out_hw_pg_ver;
+ }
+
+ goto out;
+
+out_hw_pg_ver:
+ device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
+
+out_bt_coex_state:
+ device_remove_file(wl->dev, &dev_attr_bt_coex_state);
+
+out:
+ return ret;
+}
+
+void wlcore_sysfs_free(struct wl1271 *wl)
+{
+ device_remove_bin_file(wl->dev, &fwlog_attr);
+
+ device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
+
+ device_remove_file(wl->dev, &dev_attr_bt_coex_state);
+}
diff --git a/drivers/net/wireless/ti/wlcore/sysfs.h b/drivers/net/wireless/ti/wlcore/sysfs.h
new file mode 100644
index 000000000000..c1488921839d
--- /dev/null
+++ b/drivers/net/wireless/ti/wlcore/sysfs.h
@@ -0,0 +1,28 @@
+/*
+ * This file is part of wlcore
+ *
+ * Copyright (C) 2013 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __SYSFS_H__
+#define __SYSFS_H__
+
+int wlcore_sysfs_init(struct wl1271 *wl);
+void wlcore_sysfs_free(struct wl1271 *wl);
+
+#endif
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index 004d02e71f01..7e93fe63a2c7 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -386,7 +386,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
(cipher == WLAN_CIPHER_SUITE_WEP104);
- if (unlikely(is_wep && wlvif->default_key != idx)) {
+ if (WARN_ON(is_wep && wlvif->default_key != idx)) {
ret = wl1271_set_default_wep_key(wl, wlvif, idx);
if (ret < 0)
return ret;