summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-08-16 00:04:25 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-16 00:04:25 +0200
commit9a76aba02a37718242d7cdc294f0a3901928aa57 (patch)
tree2040d038f85d2120f21af83b0793efd5af1864e3 /drivers/net/wireless
parentx86: i8259: Add missing include file (diff)
parentbpf: test: fix spelling mistake "REUSEEPORT" -> "REUSEPORT" (diff)
downloadlinux-9a76aba02a37718242d7cdc294f0a3901928aa57.tar.xz
linux-9a76aba02a37718242d7cdc294f0a3901928aa57.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Highlights: - Gustavo A. R. Silva keeps working on the implicit switch fallthru changes. - Support 802.11ax High-Efficiency wireless in cfg80211 et al, From Luca Coelho. - Re-enable ASPM in r8169, from Kai-Heng Feng. - Add virtual XFRM interfaces, which avoids all of the limitations of existing IPSEC tunnels. From Steffen Klassert. - Convert GRO over to use a hash table, so that when we have many flows active we don't traverse a long list during accumluation. - Many new self tests for routing, TC, tunnels, etc. Too many contributors to mention them all, but I'm really happy to keep seeing this stuff. - Hardware timestamping support for dpaa_eth/fsl-fman from Yangbo Lu. - Lots of cleanups and fixes in L2TP code from Guillaume Nault. - Add IPSEC offload support to netdevsim, from Shannon Nelson. - Add support for slotting with non-uniform distribution to netem packet scheduler, from Yousuk Seung. - Add UDP GSO support to mlx5e, from Boris Pismenny. - Support offloading of Team LAG in NFP, from John Hurley. - Allow to configure TX queue selection based upon RX queue, from Amritha Nambiar. - Support ethtool ring size configuration in aquantia, from Anton Mikaev. - Support DSCP and flowlabel per-transport in SCTP, from Xin Long. - Support list based batching and stack traversal of SKBs, this is very exciting work. From Edward Cree. - Busyloop optimizations in vhost_net, from Toshiaki Makita. - Introduce the ETF qdisc, which allows time based transmissions. IGB can offload this in hardware. From Vinicius Costa Gomes. - Add parameter support to devlink, from Moshe Shemesh. - Several multiplication and division optimizations for BPF JIT in nfp driver, from Jiong Wang. - Lots of prepatory work to make more of the packet scheduler layer lockless, when possible, from Vlad Buslov. - Add ACK filter and NAT awareness to sch_cake packet scheduler, from Toke Høiland-Jørgensen. - Support regions and region snapshots in devlink, from Alex Vesker. - Allow to attach XDP programs to both HW and SW at the same time on a given device, with initial support in nfp. From Jakub Kicinski. - Add TLS RX offload and support in mlx5, from Ilya Lesokhin. - Use PHYLIB in r8169 driver, from Heiner Kallweit. - All sorts of changes to support Spectrum 2 in mlxsw driver, from Ido Schimmel. - PTP support in mv88e6xxx DSA driver, from Andrew Lunn. - Make TCP_USER_TIMEOUT socket option more accurate, from Jon Maxwell. - Support for templates in packet scheduler classifier, from Jiri Pirko. - IPV6 support in RDS, from Ka-Cheong Poon. - Native tproxy support in nf_tables, from Máté Eckl. - Maintain IP fragment queue in an rbtree, but optimize properly for in-order frags. From Peter Oskolkov. - Improvde handling of ACKs on hole repairs, from Yuchung Cheng" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1996 commits) bpf: test: fix spelling mistake "REUSEEPORT" -> "REUSEPORT" hv/netvsc: Fix NULL dereference at single queue mode fallback net: filter: mark expected switch fall-through xen-netfront: fix warn message as irq device name has '/' cxgb4: Add new T5 PCI device ids 0x50af and 0x50b0 net: dsa: mv88e6xxx: missing unlock on error path rds: fix building with IPV6=m inet/connection_sock: prefer _THIS_IP_ to current_text_addr net: dsa: mv88e6xxx: bitwise vs logical bug net: sock_diag: Fix spectre v1 gadget in __sock_diag_cmd() ieee802154: hwsim: using right kind of iteration net: hns3: Add vlan filter setting by ethtool command -K net: hns3: Set tx ring' tc info when netdev is up net: hns3: Remove tx ring BD len register in hns3_enet net: hns3: Fix desc num set to default when setting channel net: hns3: Fix for phy link issue when using marvell phy driver net: hns3: Fix for information of phydev lost problem when down/up net: hns3: Fix for command format parsing error in hclge_is_all_function_id_zero net: hns3: Add support for serdes loopback selftest bnxt_en: take coredump_record structure off stack ...
Diffstat (limited to 'drivers/net/wireless')
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig24
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h42
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c20
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h8
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c70
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c16
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c79
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h42
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c16
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.h8
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c47
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h12
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c85
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h17
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c101
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h23
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c1
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c5
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c17
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c10
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c1
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c67
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c30
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c95
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h3
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile1
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c80
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c847
-rw-r--r--drivers/net/wireless/ath/wil6210/ethtool.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c289
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c425
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c73
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c129
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c53
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.h59
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c708
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h112
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c1608
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.h568
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h316
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_crash_dump.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.h1
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c723
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h824
-rw-r--r--drivers/net/wireless/atmel/atmel.c18
-rw-r--r--drivers/net/wireless/broadcom/b43/leds.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/leds.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c48
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c25
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c40
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h43
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c2
-rw-r--r--drivers/net/wireless/cisco/airo.c8
-rw-r--r--drivers/net/wireless/cisco/airo_cs.c3
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c25
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.h12
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c6
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.h6
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_wx.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-debug.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c10
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/2000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c163
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/5000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/6000.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/7000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h172
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h250
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/common_rx.c88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c284
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h286
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c74
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c110
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c50
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c211
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c44
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c364
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c48
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c207
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h294
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c388
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c235
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c192
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c92
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ap.c8
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_hw.c17
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_proc.c10
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c123
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c12
-rw-r--r--drivers/net/wireless/marvell/libertas/dev.h1
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c30
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c7
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c95
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c17
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c34
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ie.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c36
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h17
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_tx.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c55
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c25
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig27
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile20
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c77
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h43
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h174
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/Makefile7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/core.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c166
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/dma.c522
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/dma.h126
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c445
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h149
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c720
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h282
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h772
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mac.c658
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mac.h154
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c403
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c656
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h101
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h330
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c1008
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.h81
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/regs.h651
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/trace.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/trace.h313
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/tx.c270
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c381
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.h61
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/util.c42
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2.h107
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_common.c350
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c377
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h64
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dma.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dma.h38
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_init.c305
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c259
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mac.c641
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c699
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_main.c326
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_phy.c360
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c349
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_regs.h30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_tx.c161
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c149
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_usb.c142
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u.h83
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_core.c108
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_init.c318
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c240
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_main.c185
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c463
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c303
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c85
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c845
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_mcu.c242
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.h71
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/init.c1
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c11
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c103
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c211
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c26
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h105
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c18
-rw-r--r--drivers/net/wireless/ray_cs.c6
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c180
-rw-r--r--drivers/net/wireless/rndis_wlan.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c38
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c3
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_main.c7
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c23
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c5
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h2
-rw-r--r--drivers/net/wireless/rsi/rsi_sdio.h3
-rw-r--r--drivers/net/wireless/rsi/rsi_usb.h3
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c29
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c10
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c90
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c538
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c146
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c8
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c13
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c13
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c20
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c10
-rw-r--r--drivers/net/wireless/ti/wlcore/vendor_cmd.c30
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h1
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_chip.c2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c21
285 files changed, 25753 insertions, 4805 deletions
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index 84f071ac0d84..54ff5930126c 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -1,15 +1,15 @@
config ATH10K
- tristate "Atheros 802.11ac wireless cards support"
- depends on MAC80211 && HAS_DMA
+ tristate "Atheros 802.11ac wireless cards support"
+ depends on MAC80211 && HAS_DMA
select ATH_COMMON
select CRC32
select WANT_DEV_COREDUMP
select ATH10K_CE
- ---help---
- This module adds support for wireless adapters based on
- Atheros IEEE 802.11ac family of chipsets.
+ ---help---
+ This module adds support for wireless adapters based on
+ Atheros IEEE 802.11ac family of chipsets.
- If you choose to build a module, it'll be called ath10k.
+ If you choose to build a module, it'll be called ath10k.
config ATH10K_CE
bool
@@ -41,12 +41,12 @@ config ATH10K_USB
work in progress and will not fully work.
config ATH10K_SNOC
- tristate "Qualcomm ath10k SNOC support (EXPERIMENTAL)"
- depends on ATH10K && ARCH_QCOM
- ---help---
- This module adds support for integrated WCN3990 chip connected
- to system NOC(SNOC). Currently work in progress and will not
- fully work.
+ tristate "Qualcomm ath10k SNOC support (EXPERIMENTAL)"
+ depends on ATH10K && ARCH_QCOM
+ ---help---
+ This module adds support for integrated WCN3990 chip connected
+ to system NOC(SNOC). Currently work in progress and will not
+ fully work.
config ATH10K_DEBUG
bool "Atheros ath10k debugging"
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index fa39ffffd34d..c9bd0e2b5db7 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -133,11 +133,8 @@ static void ath10k_ahb_clock_deinit(struct ath10k *ar)
static int ath10k_ahb_clock_enable(struct ath10k *ar)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
- struct device *dev;
int ret;
- dev = &ar_ahb->pdev->dev;
-
if (IS_ERR_OR_NULL(ar_ahb->cmd_clk) ||
IS_ERR_OR_NULL(ar_ahb->ref_clk) ||
IS_ERR_OR_NULL(ar_ahb->rtc_clk)) {
@@ -451,12 +448,10 @@ static int ath10k_ahb_resource_init(struct ath10k *ar)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
struct platform_device *pdev;
- struct device *dev;
struct resource *res;
int ret;
pdev = ar_ahb->pdev;
- dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 3b96a43fbda4..18c709c484e7 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1512,7 +1512,7 @@ ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id,
ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
if (ret) {
dma_free_coherent(ar->dev,
- (nentries * sizeof(struct ce_desc) +
+ (nentries * sizeof(struct ce_desc_64) +
CE_DESC_RING_ALIGN),
src_ring->base_addr_owner_space_unaligned,
base_addr);
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index dbeffaef6024..b8fb5382dede 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -383,4 +383,46 @@ static inline u32 ath10k_ce_interrupt_summary(struct ath10k *ar)
return CE_INTERRUPT_SUMMARY;
}
+/* Host software's Copy Engine configuration. */
+#define CE_ATTR_FLAGS 0
+
+/*
+ * Configuration information for a Copy Engine pipe.
+ * Passed from Host to Target during startup (one per CE).
+ *
+ * NOTE: Structure is shared between Host software and Target firmware!
+ */
+struct ce_pipe_config {
+ __le32 pipenum;
+ __le32 pipedir;
+ __le32 nentries;
+ __le32 nbytes_max;
+ __le32 flags;
+ __le32 reserved;
+};
+
+/*
+ * Directions for interconnect pipe configuration.
+ * These definitions may be used during configuration and are shared
+ * between Host and Target.
+ *
+ * Pipe Directions are relative to the Host, so PIPEDIR_IN means
+ * "coming IN over air through Target to Host" as with a WiFi Rx operation.
+ * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air"
+ * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man"
+ * Target since things that are "PIPEDIR_OUT" are coming IN to the Target
+ * over the interconnect.
+ */
+#define PIPEDIR_NONE 0
+#define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */
+#define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */
+#define PIPEDIR_INOUT 3 /* bidirectional */
+
+/* Establish a mapping between a service/direction and a pipe. */
+struct service_to_pipe {
+ __le32 service_id;
+ __le32 pipedir;
+ __le32 pipenum;
+};
+
#endif /* _CE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index ad4f6e3c0737..c40cd129afe7 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -41,10 +41,8 @@ static bool uart_print;
static bool skip_otp;
static bool rawmode;
-/* Enable ATH10K_FW_CRASH_DUMP_REGISTERS and ATH10K_FW_CRASH_DUMP_CE_DATA
- * by default.
- */
-unsigned long ath10k_coredump_mask = 0x3;
+unsigned long ath10k_coredump_mask = BIT(ATH10K_FW_CRASH_DUMP_REGISTERS) |
+ BIT(ATH10K_FW_CRASH_DUMP_CE_DATA);
/* FIXME: most of these should be readonly */
module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
@@ -82,6 +80,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
@@ -113,6 +112,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
@@ -145,6 +145,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
@@ -176,6 +177,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
@@ -207,6 +209,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
@@ -238,6 +241,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
@@ -272,6 +276,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.target_cpu_freq = 176000000,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
@@ -309,6 +314,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 4,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 11,
@@ -347,6 +353,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 12,
+ .spectral_bin_offset = 8,
/* Can do only 2x2 VHT160 or 80+80. 1560Mbps is 4x4 80Mhz
* or 2x2 160Mhz, long-guard-interval.
@@ -388,6 +395,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 12,
+ .spectral_bin_offset = 8,
/* Can do only 1x1 VHT160 or 80+80. 780Mbps is 2x2 80Mhz or
* 1x1 160Mhz, long-guard-interval.
@@ -423,6 +431,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
@@ -456,6 +465,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.target_cpu_freq = 176000000,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
@@ -494,6 +504,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 4,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 11,
@@ -2084,6 +2095,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
WMI_STAT_PEER;
ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
+ ar->wmi.mgmt_max_num_pending_tx = TARGET_TLV_MGMT_NUM_MSDU_DESC;
break;
case ATH10K_FW_WMI_OP_VERSION_10_4:
ar->max_num_peers = TARGET_10_4_NUM_PEERS;
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 951dbdd1c9eb..9feea02e7d37 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -48,7 +48,8 @@
#define WMI_READY_TIMEOUT (5 * HZ)
#define ATH10K_FLUSH_TIMEOUT_HZ (5 * HZ)
#define ATH10K_CONNECTION_LOSS_HZ (3 * HZ)
-#define ATH10K_NUM_CHANS 40
+#define ATH10K_NUM_CHANS 41
+#define ATH10K_MAX_5G_CHAN 173
/* Antenna noise floor */
#define ATH10K_DEFAULT_NOISE_FLOOR -95
@@ -185,6 +186,11 @@ struct ath10k_wmi {
const struct wmi_ops *ops;
const struct wmi_peer_flags_map *peer_flags;
+ u32 mgmt_max_num_pending_tx;
+
+ /* Protected by data_lock */
+ struct idr mgmt_pending_tx;
+
u32 num_mem_chunks;
u32 rx_decap_mode;
struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 0d98c93a3aba..0baaad90b8d1 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -1727,7 +1727,9 @@ int ath10k_debug_start(struct ath10k *ar)
ath10k_warn(ar, "failed to disable pktlog: %d\n", ret);
}
- if (ar->debug.nf_cal_period) {
+ if (ar->debug.nf_cal_period &&
+ !test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->normal_mode_fw.fw_file.fw_features)) {
ret = ath10k_wmi_pdev_set_param(ar,
ar->wmi.pdev_param->cal_period,
ar->debug.nf_cal_period);
@@ -1744,7 +1746,9 @@ void ath10k_debug_stop(struct ath10k *ar)
{
lockdep_assert_held(&ar->conf_mutex);
- ath10k_debug_cal_data_fetch(ar);
+ if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->normal_mode_fw.fw_file.fw_features))
+ ath10k_debug_cal_data_fetch(ar);
/* Must not use _sync to avoid deadlock, we do that in
* ath10k_debug_destroy(). The check for htt_stats_mask is to avoid
@@ -2293,6 +2297,52 @@ static const struct file_operations fops_tpc_stats_final = {
.llseek = default_llseek,
};
+static ssize_t ath10k_write_warm_hw_reset(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ int ret;
+ bool val;
+
+ if (kstrtobool_from_user(user_buf, count, &val))
+ return -EFAULT;
+
+ if (!val)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!(test_bit(WMI_SERVICE_RESET_CHIP, ar->wmi.svc_map)))
+ ath10k_warn(ar, "wmi service for reset chip is not available\n");
+
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pdev_reset,
+ WMI_RST_MODE_WARM_RESET);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to enable warm hw reset: %d\n", ret);
+ goto exit;
+ }
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_warm_hw_reset = {
+ .write = ath10k_write_warm_hw_reset,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath10k_debug_create(struct ath10k *ar)
{
ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN);
@@ -2367,15 +2417,18 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("fw_dbglog", 0600, ar->debug.debugfs_phy, ar,
&fops_fw_dbglog);
- debugfs_create_file("cal_data", 0400, ar->debug.debugfs_phy, ar,
- &fops_cal_data);
+ if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->normal_mode_fw.fw_file.fw_features)) {
+ debugfs_create_file("cal_data", 0400, ar->debug.debugfs_phy, ar,
+ &fops_cal_data);
+
+ debugfs_create_file("nf_cal_period", 0600, ar->debug.debugfs_phy, ar,
+ &fops_nf_cal_period);
+ }
debugfs_create_file("ani_enable", 0600, ar->debug.debugfs_phy, ar,
&fops_ani_enable);
- debugfs_create_file("nf_cal_period", 0600, ar->debug.debugfs_phy, ar,
- &fops_nf_cal_period);
-
if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
debugfs_create_file("dfs_simulate_radar", 0200, ar->debug.debugfs_phy,
ar, &fops_simulate_radar);
@@ -2418,6 +2471,9 @@ int ath10k_debug_register(struct ath10k *ar)
ar->debug.debugfs_phy, ar,
&fops_tpc_stats_final);
+ debugfs_create_file("warm_hw_reset", 0600, ar->debug.debugfs_phy, ar,
+ &fops_warm_hw_reset);
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 8902720b4e49..331b8d558791 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -274,7 +274,7 @@ ath10k_htc_process_lookahead_bundle(struct ath10k_htc *htc,
struct ath10k *ar = htc->ar;
int bundle_cnt = len / sizeof(*report);
- if (!bundle_cnt || (bundle_cnt > HTC_HOST_MAX_MSG_PER_BUNDLE)) {
+ if (!bundle_cnt || (bundle_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE)) {
ath10k_warn(ar, "Invalid lookahead bundle count: %d\n",
bundle_cnt);
return -EINVAL;
@@ -655,7 +655,7 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
sizeof(msg->hdr) + sizeof(msg->ready_ext)) {
htc->max_msgs_per_htc_bundle =
min_t(u8, msg->ready_ext.max_msgs_per_htc_bundle,
- HTC_HOST_MAX_MSG_PER_BUNDLE);
+ HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
ath10k_dbg(ar, ATH10K_DBG_HTC,
"Extended ready message. RX bundle size: %d\n",
htc->max_msgs_per_htc_bundle);
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index 34877597dd6a..51fda6c23f69 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -50,7 +50,8 @@ struct ath10k;
* 4-byte aligned.
*/
-#define HTC_HOST_MAX_MSG_PER_BUNDLE 8
+#define HTC_HOST_MAX_MSG_PER_RX_BUNDLE 8
+#define HTC_HOST_MAX_MSG_PER_TX_BUNDLE 16
enum ath10k_htc_tx_flags {
ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
@@ -58,6 +59,7 @@ enum ath10k_htc_tx_flags {
};
enum ath10k_htc_rx_flags {
+ ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK = 0x01,
ATH10K_HTC_FLAG_TRAILER_PRESENT = 0x02,
ATH10K_HTC_FLAG_BUNDLE_MASK = 0xF0
};
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index c72d8af122a2..4d1cd90d6d27 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -268,11 +268,12 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
spin_lock_bh(&htt->rx_ring.lock);
ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
htt->rx_ring.fill_cnt));
- spin_unlock_bh(&htt->rx_ring.lock);
if (ret)
ath10k_htt_rx_ring_free(htt);
+ spin_unlock_bh(&htt->rx_ring.lock);
+
return ret;
}
@@ -284,7 +285,9 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
skb_queue_purge(&htt->rx_in_ord_compl_q);
skb_queue_purge(&htt->tx_fetch_ind_q);
+ spin_lock_bh(&htt->rx_ring.lock);
ath10k_htt_rx_ring_free(htt);
+ spin_unlock_bh(&htt->rx_ring.lock);
dma_free_coherent(htt->ar->dev,
ath10k_htt_get_rx_ring_size(htt),
@@ -1089,7 +1092,7 @@ static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
status = IEEE80211_SKB_RXCB(skb);
*status = *rx_status;
- __skb_queue_tail(&ar->htt.rx_msdus_q, skb);
+ skb_queue_tail(&ar->htt.rx_msdus_q, skb);
}
static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
@@ -2810,7 +2813,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
- __skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
+ skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
return false;
}
case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
@@ -2874,7 +2877,7 @@ static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
if (skb_queue_empty(&ar->htt.rx_msdus_q))
break;
- skb = __skb_dequeue(&ar->htt.rx_msdus_q);
+ skb = skb_dequeue(&ar->htt.rx_msdus_q);
if (!skb)
break;
ath10k_process_rx(ar, skb);
@@ -2905,7 +2908,7 @@ int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
goto exit;
}
- while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
+ while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) {
spin_lock_bh(&htt->rx_ring.lock);
ret = ath10k_htt_rx_in_ord_ind(ar, skb);
spin_unlock_bh(&htt->rx_ring.lock);
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 5d8b97a0ccaa..7cff0d52338f 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -208,10 +208,10 @@ int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
struct ath10k *ar = htt->ar;
int ret;
- lockdep_assert_held(&htt->tx_lock);
-
+ spin_lock_bh(&htt->tx_lock);
ret = idr_alloc(&htt->pending_tx, skb, 0,
htt->max_num_pending_tx, GFP_ATOMIC);
+ spin_unlock_bh(&htt->tx_lock);
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
@@ -1056,7 +1056,7 @@ static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth)
if (!is_eth && ieee80211_is_mgmt(hdr->frame_control))
return HTT_DATA_TX_EXT_TID_MGMT;
else if (cb->flags & ATH10K_SKB_F_QOS)
- return skb->priority % IEEE80211_QOS_CTL_TID_MASK;
+ return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
else
return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
}
@@ -1077,9 +1077,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
len += sizeof(cmd->hdr);
len += sizeof(cmd->mgmt_tx);
- spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
- spin_unlock_bh(&htt->tx_lock);
if (res < 0)
goto err;
@@ -1161,9 +1159,7 @@ static int ath10k_htt_tx_32(struct ath10k_htt *htt,
struct htt_msdu_ext_desc *ext_desc = NULL;
struct htt_msdu_ext_desc *ext_desc_t = NULL;
- spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
- spin_unlock_bh(&htt->tx_lock);
if (res < 0)
goto err;
@@ -1202,7 +1198,7 @@ static int ath10k_htt_tx_32(struct ath10k_htt *htt,
case ATH10K_HW_TXRX_RAW:
case ATH10K_HW_TXRX_NATIVE_WIFI:
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
- /* pass through */
+ /* fall through */
case ATH10K_HW_TXRX_ETHERNET:
if (ar->hw_params.continuous_frag_desc) {
ext_desc_t = htt->frag_desc.vaddr_desc_32;
@@ -1363,9 +1359,7 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt,
struct htt_msdu_ext_desc_64 *ext_desc = NULL;
struct htt_msdu_ext_desc_64 *ext_desc_t = NULL;
- spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
- spin_unlock_bh(&htt->tx_lock);
if (res < 0)
goto err;
@@ -1404,7 +1398,7 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt,
case ATH10K_HW_TXRX_RAW:
case ATH10K_HW_TXRX_NATIVE_WIFI:
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
- /* pass through */
+ /* fall through */
case ATH10K_HW_TXRX_ETHERNET:
if (ar->hw_params.continuous_frag_desc) {
ext_desc_t = htt->frag_desc.vaddr_desc_64;
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 23467e9fefeb..977f79ebb4fd 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -586,6 +586,9 @@ struct ath10k_hw_params {
/* target supporting retention restore on ddr */
bool rri_on_ddr;
+
+ /* Number of bytes to be the offset for each FFT sample */
+ int spectral_bin_offset;
};
struct htt_rx_desc;
@@ -696,6 +699,7 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
#define TARGET_TLV_NUM_TIDS ((TARGET_TLV_NUM_PEERS) * 2)
#define TARGET_TLV_NUM_MSDU_DESC (1024 + 32)
#define TARGET_TLV_NUM_WOW_PATTERNS 22
+#define TARGET_TLV_MGMT_NUM_MSDU_DESC (50)
/* Target specific defines for WMI-HL-1.0 firmware */
#define TARGET_HL_10_TLV_NUM_PEERS 14
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 836e0a47b94a..90f9372dec25 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -101,6 +101,8 @@ static struct ieee80211_rate ath10k_rates_rev2[] = {
#define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0)
#define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2))
+#define ath10k_wmi_legacy_rates ath10k_rates
+
static bool ath10k_mac_bitrate_is_cck(int bitrate)
{
switch (bitrate) {
@@ -3085,6 +3087,13 @@ static int ath10k_update_channel_list(struct ath10k *ar)
passive = channel->flags & IEEE80211_CHAN_NO_IR;
ch->passive = passive;
+ /* the firmware is ignoring the "radar" flag of the
+ * channel and is scanning actively using Probe Requests
+ * on "Radar detection"/DFS channels which are not
+ * marked as "available"
+ */
+ ch->passive |= ch->chan_radar;
+
ch->freq = channel->center_freq;
ch->band_center_freq1 = channel->center_freq;
ch->min_power = 0;
@@ -4026,7 +4035,7 @@ void ath10k_mac_tx_push_pending(struct ath10k *ar)
drv_priv);
/* Prevent aggressive sta/tid taking over tx queue */
- max = 16;
+ max = HTC_HOST_MAX_MSG_PER_TX_BUNDLE;
ret = 0;
while (ath10k_mac_tx_can_push(hw, txq) && max--) {
ret = ath10k_mac_tx_push_txq(hw, txq);
@@ -4047,6 +4056,7 @@ void ath10k_mac_tx_push_pending(struct ath10k *ar)
rcu_read_unlock();
spin_unlock_bh(&ar->txqs_lock);
}
+EXPORT_SYMBOL(ath10k_mac_tx_push_pending);
/************/
/* Scanning */
@@ -4287,7 +4297,7 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
struct ieee80211_txq *f_txq;
struct ath10k_txq *f_artxq;
int ret = 0;
- int max = 16;
+ int max = HTC_HOST_MAX_MSG_PER_TX_BUNDLE;
spin_lock_bh(&ar->txqs_lock);
if (list_empty(&artxq->list))
@@ -5438,8 +5448,12 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = (void *)vif->drv_priv;
- int ret = 0;
+ struct cfg80211_chan_def def;
u32 vdev_param, pdev_param, slottime, preamble;
+ u16 bitrate, hw_value;
+ u8 rate;
+ int rateidx, ret = 0;
+ enum nl80211_band band;
mutex_lock(&ar->conf_mutex);
@@ -5607,6 +5621,44 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
}
+ if (changed & BSS_CHANGED_MCAST_RATE &&
+ !WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) {
+ band = def.chan->band;
+ rateidx = vif->bss_conf.mcast_rate[band] - 1;
+
+ if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
+ rateidx += ATH10K_MAC_FIRST_OFDM_RATE_IDX;
+
+ bitrate = ath10k_wmi_legacy_rates[rateidx].bitrate;
+ hw_value = ath10k_wmi_legacy_rates[rateidx].hw_value;
+ if (ath10k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ rate = ATH10K_HW_RATECODE(hw_value, 0, preamble);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vdev %d mcast_rate %x\n",
+ arvif->vdev_id, rate);
+
+ vdev_param = ar->wmi.vdev_param->mcast_data_rate;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret)
+ ath10k_warn(ar,
+ "failed to set mcast rate on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ vdev_param = ar->wmi.vdev_param->bcast_data_rate;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret)
+ ath10k_warn(ar,
+ "failed to set bcast rate on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
mutex_unlock(&ar->conf_mutex);
}
@@ -6062,13 +6114,13 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
mode = chan_to_phymode(&def);
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d phymode %d\n",
- sta->addr, bw, mode);
+ sta->addr, bw, mode);
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
- WMI_PEER_PHYMODE, mode);
+ WMI_PEER_PHYMODE, mode);
if (err) {
ath10k_warn(ar, "failed to update STA %pM peer phymode %d: %d\n",
- sta->addr, mode, err);
+ sta->addr, mode, err);
goto exit;
}
@@ -6934,7 +6986,6 @@ ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
const struct cfg80211_bitrate_mask *mask,
u8 *rate, u8 *nss)
{
- struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
int rate_idx;
int i;
u16 bitrate;
@@ -6944,8 +6995,11 @@ ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
if (hweight32(mask->control[band].legacy) == 1) {
rate_idx = ffs(mask->control[band].legacy) - 1;
- hw_rate = sband->bitrates[rate_idx].hw_value;
- bitrate = sband->bitrates[rate_idx].bitrate;
+ if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
+ rate_idx += ATH10K_MAC_FIRST_OFDM_RATE_IDX;
+
+ hw_rate = ath10k_wmi_legacy_rates[rate_idx].hw_value;
+ bitrate = ath10k_wmi_legacy_rates[rate_idx].bitrate;
if (ath10k_mac_bitrate_is_cck(bitrate))
preamble = WMI_RATE_PREAMBLE_CCK;
@@ -7737,7 +7791,7 @@ static void ath10k_sta_statistics(struct ieee80211_hw *hw,
return;
sinfo->rx_duration = arsta->rx_duration;
- sinfo->filled |= 1ULL << NL80211_STA_INFO_RX_DURATION;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
if (!arsta->txrate.legacy && !arsta->txrate.nss)
return;
@@ -7750,7 +7804,7 @@ static void ath10k_sta_statistics(struct ieee80211_hw *hw,
sinfo->txrate.bw = arsta->txrate.bw;
}
sinfo->txrate.flags = arsta->txrate.flags;
- sinfo->filled |= 1ULL << NL80211_STA_INFO_TX_BITRATE;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
}
static const struct ieee80211_ops ath10k_ops = {
@@ -7870,6 +7924,9 @@ static const struct ieee80211_channel ath10k_5ghz_channels[] = {
CHAN5G(161, 5805, 0),
CHAN5G(165, 5825, 0),
CHAN5G(169, 5845, 0),
+ CHAN5G(173, 5865, 0),
+ /* If you add more, you may need to change ATH10K_MAX_5G_CHAN */
+ /* And you will definitely need to change ATH10K_NUM_CHANS in core.h */
};
struct ath10k *ath10k_mac_create(size_t priv_size)
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index e52fd83156b6..0ed436657108 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -86,48 +86,6 @@ struct pcie_state {
/* PCIE_CONFIG_FLAG definitions */
#define PCIE_CONFIG_FLAG_ENABLE_L1 0x0000001
-/* Host software's Copy Engine configuration. */
-#define CE_ATTR_FLAGS 0
-
-/*
- * Configuration information for a Copy Engine pipe.
- * Passed from Host to Target during startup (one per CE).
- *
- * NOTE: Structure is shared between Host software and Target firmware!
- */
-struct ce_pipe_config {
- __le32 pipenum;
- __le32 pipedir;
- __le32 nentries;
- __le32 nbytes_max;
- __le32 flags;
- __le32 reserved;
-};
-
-/*
- * Directions for interconnect pipe configuration.
- * These definitions may be used during configuration and are shared
- * between Host and Target.
- *
- * Pipe Directions are relative to the Host, so PIPEDIR_IN means
- * "coming IN over air through Target to Host" as with a WiFi Rx operation.
- * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air"
- * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man"
- * Target since things that are "PIPEDIR_OUT" are coming IN to the Target
- * over the interconnect.
- */
-#define PIPEDIR_NONE 0
-#define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */
-#define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */
-#define PIPEDIR_INOUT 3 /* bidirectional */
-
-/* Establish a mapping between a service/direction and a pipe. */
-struct service_to_pipe {
- __le32 service_id;
- __le32 pipedir;
- __le32 pipenum;
-};
-
/* Per-pipe state. */
struct ath10k_pci_pipe {
/* Handle of underlying Copy Engine */
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index d612ce8c9cff..7f61591ce0de 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -30,6 +30,7 @@
#include "debug.h"
#include "hif.h"
#include "htc.h"
+#include "mac.h"
#include "targaddrs.h"
#include "trace.h"
#include "sdio.h"
@@ -396,6 +397,7 @@ static int ath10k_sdio_mbox_rx_process_packet(struct ath10k *ar,
int ret;
payload_len = le16_to_cpu(htc_hdr->len);
+ skb->len = payload_len + sizeof(struct ath10k_htc_hdr);
if (trailer_present) {
trailer = skb->data + sizeof(*htc_hdr) +
@@ -434,12 +436,14 @@ static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
enum ath10k_htc_ep_id id;
int ret, i, *n_lookahead_local;
u32 *lookaheads_local;
+ int lookahead_idx = 0;
for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
lookaheads_local = lookaheads;
n_lookahead_local = n_lookahead;
- id = ((struct ath10k_htc_hdr *)&lookaheads[i])->eid;
+ id = ((struct ath10k_htc_hdr *)
+ &lookaheads[lookahead_idx++])->eid;
if (id >= ATH10K_HTC_EP_COUNT) {
ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n",
@@ -462,6 +466,7 @@ static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
/* Only read lookahead's from RX trailers
* for the last packet in a bundle.
*/
+ lookahead_idx--;
lookaheads_local = NULL;
n_lookahead_local = NULL;
}
@@ -505,11 +510,11 @@ static int ath10k_sdio_mbox_alloc_pkt_bundle(struct ath10k *ar,
*bndl_cnt = FIELD_GET(ATH10K_HTC_FLAG_BUNDLE_MASK, htc_hdr->flags);
- if (*bndl_cnt > HTC_HOST_MAX_MSG_PER_BUNDLE) {
+ if (*bndl_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE) {
ath10k_warn(ar,
"HTC bundle length %u exceeds maximum %u\n",
le16_to_cpu(htc_hdr->len),
- HTC_HOST_MAX_MSG_PER_BUNDLE);
+ HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
return -ENOMEM;
}
@@ -600,6 +605,9 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
* ATH10K_HTC_FLAG_BUNDLE_MASK flag set, all bundled
* packet skb's have been allocated in the previous step.
*/
+ if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK)
+ full_len += ATH10K_HIF_MBOX_BLOCK_SIZE;
+
ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[i],
act_len,
full_len,
@@ -1342,6 +1350,8 @@ static void ath10k_sdio_irq_handler(struct sdio_func *func)
break;
} while (time_before(jiffies, timeout) && !done);
+ ath10k_mac_tx_push_pending(ar);
+
sdio_claim_host(ar_sdio->func);
if (ret && ret != -ECANCELED)
diff --git a/drivers/net/wireless/ath/ath10k/sdio.h b/drivers/net/wireless/ath/ath10k/sdio.h
index 4ff7b545293b..453eb6263143 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.h
+++ b/drivers/net/wireless/ath/ath10k/sdio.h
@@ -96,14 +96,14 @@
* way:
*
* Let's assume that each packet in a bundle of the maximum bundle size
- * (HTC_HOST_MAX_MSG_PER_BUNDLE) has the HTC header bundle count set
- * to the maximum value (HTC_HOST_MAX_MSG_PER_BUNDLE).
+ * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE) has the HTC header bundle count set
+ * to the maximum value (HTC_HOST_MAX_MSG_PER_RX_BUNDLE).
*
* in this case the driver must allocate
- * (HTC_HOST_MAX_MSG_PER_BUNDLE * HTC_HOST_MAX_MSG_PER_BUNDLE) skb's.
+ * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * HTC_HOST_MAX_MSG_PER_RX_BUNDLE) skb's.
*/
#define ATH10K_SDIO_MAX_RX_MSGS \
- (HTC_HOST_MAX_MSG_PER_BUNDLE * HTC_HOST_MAX_MSG_PER_BUNDLE)
+ (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * HTC_HOST_MAX_MSG_PER_RX_BUNDLE)
#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL 0x00000868u
#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index a3a7042fe13a..fa1843a7e0fd 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -14,19 +14,20 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/module.h>
+#include <linux/clk.h>
#include <linux/kernel.h>
-#include "debug.h"
-#include "hif.h"
-#include "htc.h"
-#include "ce.h"
-#include "snoc.h"
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
-#include <linux/clk.h>
-#define WCN3990_CE_ATTR_FLAGS 0
+
+#include "ce.h"
+#include "debug.h"
+#include "hif.h"
+#include "htc.h"
+#include "snoc.h"
+
#define ATH10K_SNOC_RX_POST_RETRY_MS 50
#define CE_POLL_PIPE 4
@@ -449,7 +450,7 @@ static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
static void ath10k_snoc_rx_replenish_retry(struct timer_list *t)
{
- struct ath10k_pci *ar_snoc = from_timer(ar_snoc, t, rx_post_retry);
+ struct ath10k_snoc *ar_snoc = from_timer(ar_snoc, t, rx_post_retry);
struct ath10k *ar = ar_snoc->ar;
ath10k_snoc_rx_post(ar);
@@ -820,7 +821,7 @@ static const struct ath10k_bus_ops ath10k_snoc_bus_ops = {
.write32 = ath10k_snoc_write32,
};
-int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq)
+static int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
int i;
@@ -868,7 +869,7 @@ static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
return done;
}
-void ath10k_snoc_init_napi(struct ath10k *ar)
+static void ath10k_snoc_init_napi(struct ath10k *ar)
{
netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll,
ATH10K_NAPI_BUDGET);
@@ -1303,13 +1304,13 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops;
ar->ce_priv = &ar_snoc->ce;
- ath10k_snoc_resource_init(ar);
+ ret = ath10k_snoc_resource_init(ar);
if (ret) {
ath10k_warn(ar, "failed to initialize resource: %d\n", ret);
goto err_core_destroy;
}
- ath10k_snoc_setup_resource(ar);
+ ret = ath10k_snoc_setup_resource(ar);
if (ret) {
ath10k_warn(ar, "failed to setup resource: %d\n", ret);
goto err_core_destroy;
@@ -1388,25 +1389,7 @@ static struct platform_driver ath10k_snoc_driver = {
.of_match_table = ath10k_snoc_dt_match,
},
};
-
-static int __init ath10k_snoc_init(void)
-{
- int ret;
-
- ret = platform_driver_register(&ath10k_snoc_driver);
- if (ret)
- pr_err("failed to register ath10k snoc driver: %d\n",
- ret);
-
- return ret;
-}
-module_init(ath10k_snoc_init);
-
-static void __exit ath10k_snoc_exit(void)
-{
- platform_driver_unregister(&ath10k_snoc_driver);
-}
-module_exit(ath10k_snoc_exit);
+module_platform_driver(ath10k_snoc_driver);
MODULE_AUTHOR("Qualcomm");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h
index 05dc98f46ccd..f9e530189d48 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.h
+++ b/drivers/net/wireless/ath/ath10k/snoc.h
@@ -19,7 +19,6 @@
#include "hw.h"
#include "ce.h"
-#include "pci.h"
struct ath10k_snoc_drv_priv {
enum ath10k_hw_rev hw_rev;
diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c
index af6995de7e00..653b6d013207 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.c
+++ b/drivers/net/wireless/ath/ath10k/spectral.c
@@ -145,7 +145,7 @@ int ath10k_spectral_process_fft(struct ath10k *ar,
fft_sample->noise = __cpu_to_be16(phyerr->nf_chains[chain_idx]);
bins = (u8 *)fftr;
- bins += sizeof(*fftr);
+ bins += sizeof(*fftr) + ar->hw_params.spectral_bin_offset;
fft_sample->tsf = __cpu_to_be64(tsf);
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 5ecce04005d2..7fd63bbf8e24 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -31,6 +31,8 @@ struct wmi_ops {
struct wmi_scan_ev_arg *arg);
int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_mgmt_rx_ev_arg *arg);
+ int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_tlv_mgmt_tx_compl_ev_arg *arg);
int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_ch_info_ev_arg *arg);
int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
@@ -262,6 +264,16 @@ ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
}
static inline int
+ath10k_wmi_pull_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_mgmt_tx_compl)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_mgmt_tx_compl(ar, skb, arg);
+}
+
+static inline int
ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
struct wmi_mgmt_rx_ev_arg *arg)
{
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 8c49a26fc571..cdc1e64d52ad 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -618,6 +618,9 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_TLV_TDLS_PEER_EVENTID:
ath10k_wmi_event_tdls_peer(ar, skb);
break;
+ case WMI_TLV_MGMT_TX_COMPLETION_EVENTID:
+ ath10k_wmi_event_mgmt_tx_compl(ar, skb);
+ break;
default:
ath10k_warn(ar, "Unknown eventid: %d\n", id);
break;
@@ -659,6 +662,31 @@ static int ath10k_wmi_tlv_op_pull_scan_ev(struct ath10k *ar,
return 0;
}
+static int
+ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_tlv_mgmt_tx_compl_ev *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT];
+
+ arg->desc_id = ev->desc_id;
+ arg->status = ev->status;
+ arg->pdev_id = ev->pdev_id;
+
+ kfree(tb);
+ return 0;
+}
+
static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
struct sk_buff *skb,
struct wmi_mgmt_rx_ev_arg *arg)
@@ -1076,6 +1104,8 @@ static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
arg->phy_capab = ev->phy_capability;
arg->num_rf_chains = ev->num_rf_chains;
arg->eeprom_rd = reg->eeprom_rd;
+ arg->low_5ghz_chan = reg->low_5ghz_chan;
+ arg->high_5ghz_chan = reg->high_5ghz_chan;
arg->num_mem_reqs = ev->num_mem_reqs;
arg->service_map = svc_bmap;
arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap);
@@ -1584,6 +1614,11 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
cfg->keep_alive_pattern_size = __cpu_to_le32(0);
cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
+ cfg->wmi_send_separate = __cpu_to_le32(0);
+ cfg->num_ocb_vdevs = __cpu_to_le32(0);
+ cfg->num_ocb_channels = __cpu_to_le32(0);
+ cfg->num_ocb_schedules = __cpu_to_le32(0);
+ cfg->host_capab = __cpu_to_le32(0);
ath10k_wmi_put_host_mem_chunks(ar, chunks);
@@ -1614,10 +1649,10 @@ ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
ie_len = roundup(arg->ie_len, 4);
len = (sizeof(*tlv) + sizeof(*cmd)) +
- (arg->n_channels ? sizeof(*tlv) + chan_len : 0) +
- (arg->n_ssids ? sizeof(*tlv) + ssid_len : 0) +
- (arg->n_bssids ? sizeof(*tlv) + bssid_len : 0) +
- (arg->ie_len ? sizeof(*tlv) + ie_len : 0);
+ sizeof(*tlv) + chan_len +
+ sizeof(*tlv) + ssid_len +
+ sizeof(*tlv) + bssid_len +
+ sizeof(*tlv) + ie_len;
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
@@ -1785,7 +1820,6 @@ ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
{
struct wmi_tlv_vdev_start_cmd *cmd;
struct wmi_channel *ch;
- struct wmi_p2p_noa_descriptor *noa;
struct wmi_tlv *tlv;
struct sk_buff *skb;
size_t len;
@@ -1843,7 +1877,6 @@ ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
tlv->len = 0;
- noa = (void *)tlv->value;
/* Note: This is a nested TLV containing:
* [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
@@ -2605,6 +2638,30 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
return skb;
}
+static int
+ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb,
+ dma_addr_t paddr)
+{
+ struct ath10k_wmi *wmi = &ar->wmi;
+ struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
+ int ret;
+
+ pkt_addr = kmalloc(sizeof(*pkt_addr), GFP_ATOMIC);
+ if (!pkt_addr)
+ return -ENOMEM;
+
+ pkt_addr->vaddr = skb;
+ pkt_addr->paddr = paddr;
+
+ spin_lock_bh(&ar->data_lock);
+ ret = idr_alloc(&wmi->mgmt_pending_tx, pkt_addr, 0,
+ wmi->mgmt_max_num_pending_tx, GFP_ATOMIC);
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx alloc msdu_id ret %d\n", ret);
+ return ret;
+}
+
static struct sk_buff *
ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
dma_addr_t paddr)
@@ -2616,9 +2673,9 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
u32 buf_len = msdu->len;
struct wmi_tlv *tlv;
struct sk_buff *skb;
+ int len, desc_id;
u32 vdev_id;
void *ptr;
- int len;
if (!cb->vif)
return ERR_PTR(-EINVAL);
@@ -2649,13 +2706,17 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
if (!skb)
return ERR_PTR(-ENOMEM);
+ desc_id = ath10k_wmi_mgmt_tx_alloc_msdu_id(ar, msdu, paddr);
+ if (desc_id < 0)
+ goto err_free_skb;
+
ptr = (void *)skb->data;
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD);
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
cmd->vdev_id = __cpu_to_le32(vdev_id);
- cmd->desc_id = 0;
+ cmd->desc_id = __cpu_to_le32(desc_id);
cmd->chanfreq = 0;
cmd->buf_len = __cpu_to_le32(buf_len);
cmd->frame_len = __cpu_to_le32(msdu->len);
@@ -2672,6 +2733,10 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
memcpy(ptr, msdu->data, buf_len);
return skb;
+
+err_free_skb:
+ dev_kfree_skb(skb);
+ return ERR_PTR(desc_id);
}
static struct sk_buff *
@@ -2700,7 +2765,8 @@ ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
static struct sk_buff *
ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
- u32 log_level) {
+ u32 log_level)
+{
struct wmi_tlv_dbglog_cmd *cmd;
struct wmi_tlv *tlv;
struct sk_buff *skb;
@@ -3835,6 +3901,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.pull_scan = ath10k_wmi_tlv_op_pull_scan_ev,
.pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev,
+ .pull_mgmt_tx_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev,
.pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev,
.pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,
.pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev,
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index 3e1e340cd834..4f0c20c90642 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -320,6 +320,7 @@ enum wmi_tlv_event_id {
WMI_TLV_TBTTOFFSET_UPDATE_EVENTID,
WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID,
WMI_TLV_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID,
+ WMI_TLV_MGMT_TX_COMPLETION_EVENTID,
WMI_TLV_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_BA_NEG),
WMI_TLV_TX_ADDBA_COMPLETE_EVENTID,
WMI_TLV_BA_RSP_SSN_EVENTID,
@@ -1573,6 +1574,17 @@ struct wmi_tlv {
u8 value[0];
} __packed;
+struct ath10k_mgmt_tx_pkt_addr {
+ void *vaddr;
+ dma_addr_t paddr;
+};
+
+struct wmi_tlv_mgmt_tx_compl_ev {
+ __le32 desc_id;
+ __le32 status;
+ __le32 pdev_id;
+};
+
#define WMI_TLV_MGMT_RX_NUM_RSSI 4
struct wmi_tlv_mgmt_rx_ev {
@@ -1670,6 +1682,11 @@ struct wmi_tlv_resource_config {
__le32 keep_alive_pattern_size;
__le32 max_tdls_concurrent_sleep_sta;
__le32 max_tdls_concurrent_buffer_sta;
+ __le32 wmi_send_separate;
+ __le32 num_ocb_vdevs;
+ __le32 num_ocb_channels;
+ __le32 num_ocb_schedules;
+ __le32 host_capab;
} __packed;
struct wmi_tlv_init_cmd {
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index f97ab795cf2e..fd612d2905b0 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1333,7 +1333,7 @@ static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
- .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pdev_reset = WMI_10X_PDEV_PARAM_PDEV_RESET,
.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
@@ -2313,6 +2313,59 @@ static bool ath10k_wmi_rx_is_decrypted(struct ath10k *ar,
return true;
}
+static int wmi_process_mgmt_tx_comp(struct ath10k *ar, u32 desc_id,
+ u32 status)
+{
+ struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
+ struct ath10k_wmi *wmi = &ar->wmi;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *msdu;
+ int ret;
+
+ spin_lock_bh(&ar->data_lock);
+
+ pkt_addr = idr_find(&wmi->mgmt_pending_tx, desc_id);
+ if (!pkt_addr) {
+ ath10k_warn(ar, "received mgmt tx completion for invalid msdu_id: %d\n",
+ desc_id);
+ ret = -ENOENT;
+ goto out;
+ }
+
+ msdu = pkt_addr->vaddr;
+ dma_unmap_single(ar->dev, pkt_addr->paddr,
+ msdu->len, DMA_FROM_DEVICE);
+ info = IEEE80211_SKB_CB(msdu);
+ info->flags |= status;
+ ieee80211_tx_status_irqsafe(ar->hw, msdu);
+
+ ret = 0;
+
+out:
+ idr_remove(&wmi->mgmt_pending_tx, desc_id);
+ spin_unlock_bh(&ar->data_lock);
+ return ret;
+}
+
+int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_tlv_mgmt_tx_compl_ev_arg arg;
+ int ret;
+
+ ret = ath10k_wmi_pull_mgmt_tx_compl(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse mgmt comp event: %d\n", ret);
+ return ret;
+ }
+
+ wmi_process_mgmt_tx_comp(ar, __le32_to_cpu(arg.desc_id),
+ __le32_to_cpu(arg.status));
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv evnt mgmt tx completion\n");
+
+ return 0;
+}
+
int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_mgmt_rx_ev_arg arg = {};
@@ -2366,7 +2419,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
*/
if (channel >= 1 && channel <= 14) {
status->band = NL80211_BAND_2GHZ;
- } else if (channel >= 36 && channel <= 169) {
+ } else if (channel >= 36 && channel <= ATH10K_MAX_5G_CHAN) {
status->band = NL80211_BAND_5GHZ;
} else {
/* Shouldn't happen unless list of advertised channels to
@@ -4602,10 +4655,6 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
ev = (struct wmi_pdev_tpc_config_event *)skb->data;
- tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
- if (!tpc_stats)
- return;
-
num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
if (num_tx_chain > WMI_TPC_TX_N_CHAIN) {
@@ -4614,6 +4663,10 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
return;
}
+ tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
+ if (!tpc_stats)
+ return;
+
ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table,
num_tx_chain);
@@ -5018,13 +5071,11 @@ static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id,
void *vaddr;
pool_size = num_units * round_up(unit_len, 4);
- vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL);
+ vaddr = dma_zalloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL);
if (!vaddr)
return -ENOMEM;
- memset(vaddr, 0, pool_size);
-
ar->wmi.mem_chunks[idx].vaddr = vaddr;
ar->wmi.mem_chunks[idx].paddr = paddr;
ar->wmi.mem_chunks[idx].len = pool_size;
@@ -9075,6 +9126,11 @@ int ath10k_wmi_attach(struct ath10k *ar)
INIT_WORK(&ar->radar_confirmation_work,
ath10k_radar_confirmation_work);
+ if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
+ ar->running_fw->fw_file.fw_features)) {
+ idr_init(&ar->wmi.mgmt_pending_tx);
+ }
+
return 0;
}
@@ -9093,8 +9149,35 @@ void ath10k_wmi_free_host_mem(struct ath10k *ar)
ar->wmi.num_mem_chunks = 0;
}
+static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr,
+ void *ctx)
+{
+ struct ath10k_mgmt_tx_pkt_addr *pkt_addr = ptr;
+ struct ath10k *ar = ctx;
+ struct sk_buff *msdu;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "force cleanup mgmt msdu_id %hu\n", msdu_id);
+
+ msdu = pkt_addr->vaddr;
+ dma_unmap_single(ar->dev, pkt_addr->paddr,
+ msdu->len, DMA_FROM_DEVICE);
+ ieee80211_free_txskb(ar->hw, msdu);
+
+ return 0;
+}
+
void ath10k_wmi_detach(struct ath10k *ar)
{
+ if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
+ ar->running_fw->fw_file.fw_features)) {
+ spin_lock_bh(&ar->data_lock);
+ idr_for_each(&ar->wmi.mgmt_pending_tx,
+ ath10k_wmi_mgmt_tx_clean_up_pending, ar);
+ idr_destroy(&ar->wmi.mgmt_pending_tx);
+ spin_unlock_bh(&ar->data_lock);
+ }
+
cancel_work_sync(&ar->svc_rdy_work);
if (ar->svc_rdy_skb)
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index d68afb65402a..36220258e3c7 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -462,6 +462,7 @@ static inline char *wmi_service_name(int service_id)
SVCSTR(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS);
SVCSTR(WMI_SERVICE_HOST_DFS_CHECK_SUPPORT);
SVCSTR(WMI_SERVICE_TPC_STATS_FINAL);
+ SVCSTR(WMI_SERVICE_RESET_CHIP);
default:
return NULL;
}
@@ -3934,7 +3935,11 @@ enum wmi_10x_pdev_param {
WMI_10X_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
WMI_10X_PDEV_PARAM_PEER_STA_PS_STATECHG_ENABLE,
WMI_10X_PDEV_PARAM_RTS_FIXED_RATE,
- WMI_10X_PDEV_PARAM_CAL_PERIOD
+ WMI_10X_PDEV_PARAM_CAL_PERIOD,
+ WMI_10X_PDEV_PARAM_ATF_STRICT_SCH,
+ WMI_10X_PDEV_PARAM_ATF_SCHED_DURATION,
+ WMI_10X_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+ WMI_10X_PDEV_PARAM_PDEV_RESET
};
enum wmi_10_4_pdev_param {
@@ -6501,6 +6506,15 @@ struct wmi_force_fw_hang_cmd {
__le32 delay_ms;
} __packed;
+enum wmi_pdev_reset_mode_type {
+ WMI_RST_MODE_TX_FLUSH = 1,
+ WMI_RST_MODE_WARM_RESET,
+ WMI_RST_MODE_COLD_RESET,
+ WMI_RST_MODE_WARM_RESET_RESTORE_CAL,
+ WMI_RST_MODE_COLD_RESET_RESTORE_CAL,
+ WMI_RST_MODE_MAX,
+};
+
enum ath10k_dbglog_level {
ATH10K_DBGLOG_LEVEL_VERBOSE = 0,
ATH10K_DBGLOG_LEVEL_INFO = 1,
@@ -6600,6 +6614,12 @@ struct wmi_scan_ev_arg {
__le32 vdev_id;
};
+struct wmi_tlv_mgmt_tx_compl_ev_arg {
+ __le32 desc_id;
+ __le32 status;
+ __le32 pdev_id;
+};
+
struct wmi_mgmt_rx_ev_arg {
__le32 channel;
__le32 snr;
@@ -7071,6 +7091,7 @@ int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg);
int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb);
int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb);
void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb);
void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb);
int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb);
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index f23c851765df..05140d8baa36 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -670,6 +670,7 @@ ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
break;
case NL80211_IFTYPE_ADHOC:
AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG, AR5K_TXCFG_ADHOC_BCN_ATIM);
+ /* fall through */
default:
/* On non-STA modes timer1 is used as next DMA
* beacon alert (DBA) timer and timer2 as next
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index b1b8bc326830..ae08572c4b58 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -483,7 +483,6 @@ static u32
ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
{
u32 mix, step;
- u32 *rf;
const struct ath5k_gain_opt *go;
const struct ath5k_gain_opt_step *g_step;
const struct ath5k_rf_reg *rf_regs;
@@ -502,7 +501,6 @@ ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
if (ah->ah_rf_banks == NULL)
return 0;
- rf = ah->ah_rf_banks;
ah->ah_gain.g_f_corr = 0;
/* No VGA (Variable Gain Amplifier) override, skip */
@@ -549,13 +547,10 @@ ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
{
const struct ath5k_rf_reg *rf_regs;
u32 step, mix_ovr, level[4];
- u32 *rf;
if (ah->ah_rf_banks == NULL)
return false;
- rf = ah->ah_rf_banks;
-
if (ah->ah_radio == AR5K_RF5111) {
rf_regs = rf_regs_5111;
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.c b/drivers/net/wireless/ath/ath6kl/bmi.c
index 334dbd834b3a..bde5a10d470c 100644
--- a/drivers/net/wireless/ath/ath6kl/bmi.c
+++ b/drivers/net/wireless/ath/ath6kl/bmi.c
@@ -534,7 +534,7 @@ int ath6kl_bmi_init(struct ath6kl *ar)
/* cmd + addr + len + data_size */
ar->bmi.max_cmd_size = ar->bmi.max_data_size + (sizeof(u32) * 3);
- ar->bmi.cmd_buf = kzalloc(ar->bmi.max_cmd_size, GFP_ATOMIC);
+ ar->bmi.cmd_buf = kzalloc(ar->bmi.max_cmd_size, GFP_KERNEL);
if (!ar->bmi.cmd_buf)
return -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 0687697d5e2d..e121187f371f 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -1811,20 +1811,20 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
if (vif->target_stats.rx_byte) {
sinfo->rx_bytes = vif->target_stats.rx_byte;
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64);
sinfo->rx_packets = vif->target_stats.rx_pkt;
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS);
}
if (vif->target_stats.tx_byte) {
sinfo->tx_bytes = vif->target_stats.tx_byte;
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES64);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64);
sinfo->tx_packets = vif->target_stats.tx_pkt;
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
}
sinfo->signal = vif->target_stats.cs_rssi;
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
rate = vif->target_stats.tx_ucast_rate;
@@ -1857,12 +1857,12 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
if (test_bit(CONNECTED, &vif->flags) &&
test_bit(DTIM_PERIOD_AVAIL, &vif->flags) &&
vif->nw_type == INFRA_NETWORK) {
- sinfo->filled |= BIT(NL80211_STA_INFO_BSS_PARAM);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BSS_PARAM);
sinfo->bss_param.flags = 0;
sinfo->bss_param.dtim_period = vif->assoc_bss_dtim_period;
sinfo->bss_param.beacon_interval = vif->assoc_bss_beacon_int;
@@ -3899,16 +3899,19 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
switch (ar->hw.cap) {
case WMI_11AN_CAP:
ht = true;
+ /* fall through */
case WMI_11A_CAP:
band_5gig = true;
break;
case WMI_11GN_CAP:
ht = true;
+ /* fall through */
case WMI_11G_CAP:
band_2gig = true;
break;
case WMI_11AGN_CAP:
ht = true;
+ /* fall through */
case WMI_11AG_CAP:
band_2gig = true;
band_5gig = true;
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
index 546243e11737..434b66829646 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -746,10 +746,8 @@ static int ath6kl_htc_pipe_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
struct htc_endpoint *ep;
struct htc_packet *packet;
u8 ep_id, *netdata;
- u32 netlen;
netdata = skb->data;
- netlen = skb->len;
htc_hdr = (struct htc_frame_hdr *) netdata;
@@ -855,12 +853,8 @@ static int htc_process_trailer(struct htc_target *target, u8 *buffer,
{
struct htc_credit_report *report;
struct htc_record_hdr *record;
- u8 *record_buf, *orig_buf;
- int orig_len, status;
-
- orig_buf = buffer;
- orig_len = len;
- status = 0;
+ u8 *record_buf;
+ int status = 0;
while (len > 0) {
if (len < sizeof(struct htc_record_hdr)) {
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 808fb30be9ad..0c61dbaa62a4 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -272,7 +272,7 @@ int ath6kl_read_fwlogs(struct ath6kl *ar)
{
struct ath6kl_dbglog_hdr debug_hdr;
struct ath6kl_dbglog_buf debug_buf;
- u32 address, length, dropped, firstbuf, debug_hdr_addr;
+ u32 address, length, firstbuf, debug_hdr_addr;
int ret, loop;
u8 *buf;
@@ -303,7 +303,6 @@ int ath6kl_read_fwlogs(struct ath6kl *ar)
address = TARG_VTOP(ar->target_type,
le32_to_cpu(debug_hdr.dbuf_addr));
firstbuf = address;
- dropped = le32_to_cpu(debug_hdr.dropped);
ret = ath6kl_diag_read(ar, address, &debug_buf, sizeof(debug_buf));
if (ret)
goto out;
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 2195b1b7a8a6..bb50680580f3 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -1415,6 +1415,7 @@ static const struct sdio_device_id ath6kl_sdio_devices[] = {
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))},
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x2))},
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x18))},
+ {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x19))},
{},
};
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index 618d12ed4b40..b22ed499f7ba 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -1701,7 +1701,6 @@ void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
struct ath6kl_sta *sta;
struct aggr_info_conn *aggr_conn = NULL;
struct rxtid *rxtid;
- struct rxtid_stats *stats;
u16 hold_q_size;
u8 tid, aid;
@@ -1722,7 +1721,6 @@ void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
return;
rxtid = &aggr_conn->rx_tid[tid];
- stats = &aggr_conn->stat[tid];
if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX)
ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n",
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 7922550c2159..ef2dd68d3f77 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -583,12 +583,14 @@ static void ar5008_hw_init_chain_masks(struct ath_hw *ah)
case 0x5:
REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
AR_PHY_SWAP_ALT_CHAIN);
+ /* fall through */
case 0x3:
if (ah->hw_version.macVersion == AR_SREV_REVISION_5416_10) {
REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
break;
}
+ /* else: fall through */
case 0x1:
case 0x2:
case 0x7:
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 50fcd343c41a..fd9db8ca99d7 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -676,10 +676,10 @@ static int ar9002_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
return 0;
ah->cal_list_curr = currCal = currCal->calNext;
- if (currCal->calState == CAL_WAITING) {
+ if (currCal->calState == CAL_WAITING)
ath9k_hw_reset_calibration(ah, currCal);
- return 0;
- }
+
+ return 0;
}
/* Do NF cal only at longer intervals */
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index 61a9b85045d2..713291881208 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -119,6 +119,7 @@ static int ar9002_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
aModeRefSel = 2;
if (aModeRefSel)
break;
+ /* else: fall through */
case 1:
default:
aModeRefSel = 0;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index fe5102ca5010..98c5f524a360 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -1800,6 +1800,8 @@ static void ar9003_hw_spectral_scan_config(struct ath_hw *ah,
static void ar9003_hw_spectral_scan_trigger(struct ath_hw *ah)
{
+ REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_ENABLE);
/* Activate spectral scan */
REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN,
AR_PHY_SPECTRAL_SCAN_ACTIVE);
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index ef0de4f1312c..21ba20981a80 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -342,7 +342,7 @@ struct ath_chanctx {
struct ath_beacon_config beacon;
struct ath9k_hw_cal_data caldata;
- struct timespec tsf_ts;
+ struct timespec64 tsf_ts;
u64 tsf_val;
u32 last_beacon;
@@ -1021,7 +1021,7 @@ struct ath_softc {
struct ath_offchannel offchannel;
struct ath_chanctx *next_chan;
struct completion go_beacon;
- struct timespec last_event_time;
+ struct timespec64 last_event_time;
#endif
unsigned long driver_data;
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index 1b05b5d7a038..fd61ae4782b6 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -233,9 +233,9 @@ static const char *chanctx_state_string(enum ath_chanctx_state state)
static u32 chanctx_event_delta(struct ath_softc *sc)
{
u64 ms;
- struct timespec ts, *old;
+ struct timespec64 ts, *old;
- getrawmonotonic(&ts);
+ ktime_get_raw_ts64(&ts);
old = &sc->last_event_time;
ms = ts.tv_sec * 1000 + ts.tv_nsec / 1000000;
ms -= old->tv_sec * 1000 + old->tv_nsec / 1000000;
@@ -334,7 +334,7 @@ ath_chanctx_get_next(struct ath_softc *sc, struct ath_chanctx *ctx)
static void ath_chanctx_adjust_tbtt_delta(struct ath_softc *sc)
{
struct ath_chanctx *prev, *cur;
- struct timespec ts;
+ struct timespec64 ts;
u32 cur_tsf, prev_tsf, beacon_int;
s32 offset;
@@ -346,7 +346,7 @@ static void ath_chanctx_adjust_tbtt_delta(struct ath_softc *sc)
if (!prev->switch_after_beacon)
return;
- getrawmonotonic(&ts);
+ ktime_get_raw_ts64(&ts);
cur_tsf = (u32) cur->tsf_val +
ath9k_hw_get_tsf_offset(&cur->tsf_ts, &ts);
@@ -1230,7 +1230,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_chanctx *old_ctx;
- struct timespec ts;
+ struct timespec64 ts;
bool measure_time = false;
bool send_ps = false;
bool queues_stopped = false;
@@ -1260,7 +1260,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
spin_unlock_bh(&sc->chan_lock);
if (sc->next_chan == &sc->offchannel.chan) {
- getrawmonotonic(&ts);
+ ktime_get_raw_ts64(&ts);
measure_time = true;
}
@@ -1277,7 +1277,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
spin_lock_bh(&sc->chan_lock);
if (sc->cur_chan != &sc->offchannel.chan) {
- getrawmonotonic(&sc->cur_chan->tsf_ts);
+ ktime_get_raw_ts64(&sc->cur_chan->tsf_ts);
sc->cur_chan->tsf_val = ath9k_hw_gettsf64(sc->sc_ah);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index f685843a2ff3..0a6eb8a8c1ed 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -538,7 +538,7 @@ static int read_file_interrupt(struct seq_file *file, void *data)
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
PR_IS("RXLP", rxlp);
PR_IS("RXHP", rxhp);
- PR_IS("WATHDOG", bb_watchdog);
+ PR_IS("WATCHDOG", bb_watchdog);
} else {
PR_IS("RX", rxok);
}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index cb0eef13af1c..fb649d85b8fc 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -138,6 +138,7 @@ static void hif_usb_mgmt_cb(struct urb *urb)
{
struct cmd_buf *cmd = (struct cmd_buf *)urb->context;
struct hif_device_usb *hif_dev;
+ unsigned long flags;
bool txok = true;
if (!cmd || !cmd->skb || !cmd->hif_dev)
@@ -158,14 +159,14 @@ static void hif_usb_mgmt_cb(struct urb *urb)
* If the URBs are being flushed, no need to complete
* this packet.
*/
- spin_lock(&hif_dev->tx.tx_lock);
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) {
- spin_unlock(&hif_dev->tx.tx_lock);
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
dev_kfree_skb_any(cmd->skb);
kfree(cmd);
return;
}
- spin_unlock(&hif_dev->tx.tx_lock);
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
break;
default:
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 585736a837ed..799010ed04e0 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -1107,25 +1107,26 @@ void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb,
struct ath_hw *ah = priv->ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
+ unsigned long flags;
- spin_lock(&priv->rx.rxbuflock);
+ spin_lock_irqsave(&priv->rx.rxbuflock, flags);
list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) {
if (!tmp_buf->in_process) {
rxbuf = tmp_buf;
break;
}
}
- spin_unlock(&priv->rx.rxbuflock);
+ spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
if (rxbuf == NULL) {
ath_dbg(common, ANY, "No free RX buffer\n");
goto err;
}
- spin_lock(&priv->rx.rxbuflock);
+ spin_lock_irqsave(&priv->rx.rxbuflock, flags);
rxbuf->skb = skb;
rxbuf->in_process = true;
- spin_unlock(&priv->rx.rxbuflock);
+ spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
tasklet_schedule(&priv->rx_tasklet);
return;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index e60bea4604e4..bb319f22761f 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -496,7 +496,7 @@ static void ath9k_hw_init_macaddr(struct ath_hw *ah)
ath_err(common, "eeprom contains invalid mac address: %pM\n",
common->macaddr);
- random_ether_addr(common->macaddr);
+ eth_random_addr(common->macaddr);
ath_err(common, "random mac address will be used: %pM\n",
common->macaddr);
@@ -1835,13 +1835,13 @@ fail:
return -EINVAL;
}
-u32 ath9k_hw_get_tsf_offset(struct timespec *last, struct timespec *cur)
+u32 ath9k_hw_get_tsf_offset(struct timespec64 *last, struct timespec64 *cur)
{
- struct timespec ts;
+ struct timespec64 ts;
s64 usec;
if (!cur) {
- getrawmonotonic(&ts);
+ ktime_get_raw_ts64(&ts);
cur = &ts;
}
@@ -1859,7 +1859,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
u32 saveLedState;
u32 saveDefAntenna;
u32 macStaId1;
- struct timespec tsf_ts;
+ struct timespec64 tsf_ts;
u32 tsf_offset;
u64 tsf = 0;
int r;
@@ -1905,7 +1905,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B;
/* Save TSF before chip reset, a cold reset clears it */
- getrawmonotonic(&tsf_ts);
+ ktime_get_raw_ts64(&tsf_ts);
tsf = ath9k_hw_gettsf64(ah);
saveLedState = REG_READ(ah, AR_CFG_LED) &
@@ -2942,16 +2942,19 @@ void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan,
struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
struct ieee80211_channel *channel;
int chan_pwr, new_pwr;
+ u16 ctl = NO_CTL;
if (!chan)
return;
+ if (!test)
+ ctl = ath9k_regd_get_ctl(reg, chan);
+
channel = chan->chan;
chan_pwr = min_t(int, channel->max_power * 2, MAX_RATE_POWER);
new_pwr = min_t(int, chan_pwr, reg->power_limit);
- ah->eep_ops->set_txpower(ah, chan,
- ath9k_regd_get_ctl(reg, chan),
+ ah->eep_ops->set_txpower(ah, chan, ctl,
get_antenna_gain(ah, chan), new_pwr, test);
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 9804a24a2dc0..68956cdc8c9a 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -1060,7 +1060,7 @@ u32 ath9k_hw_gettsf32(struct ath_hw *ah);
u64 ath9k_hw_gettsf64(struct ath_hw *ah);
void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
void ath9k_hw_reset_tsf(struct ath_hw *ah);
-u32 ath9k_hw_get_tsf_offset(struct timespec *last, struct timespec *cur);
+u32 ath9k_hw_get_tsf_offset(struct timespec64 *last, struct timespec64 *cur);
void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set);
void ath9k_hw_init_global_settings(struct ath_hw *ah);
u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index b6663c80e7dd..1049773378f2 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1865,7 +1865,7 @@ static void ath9k_set_tsf(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
tsf -= le64_to_cpu(avp->tsf_adjust);
- getrawmonotonic(&avp->chanctx->tsf_ts);
+ ktime_get_raw_ts64(&avp->chanctx->tsf_ts);
if (sc->cur_chan == avp->chanctx)
ath9k_hw_settsf64(sc->sc_ah, tsf);
avp->chanctx->tsf_val = tsf;
@@ -1881,7 +1881,7 @@ static void ath9k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
- getrawmonotonic(&avp->chanctx->tsf_ts);
+ ktime_get_raw_ts64(&avp->chanctx->tsf_ts);
if (sc->cur_chan == avp->chanctx)
ath9k_hw_reset_tsf(sc->sc_ah);
avp->chanctx->tsf_val = 0;
@@ -1928,6 +1928,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
flush = true;
+ /* fall through */
case IEEE80211_AMPDU_TX_STOP_CONT:
ath9k_ps_wakeup(sc);
ath_tx_aggr_stop(sc, sta, tid);
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index b0b5579b7560..d1f6710ca63b 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -209,6 +209,7 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
{
struct wmi *wmi = priv;
struct wmi_cmd_hdr *hdr;
+ unsigned long flags;
u16 cmd_id;
if (unlikely(wmi->stopped))
@@ -218,20 +219,20 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
cmd_id = be16_to_cpu(hdr->command_id);
if (cmd_id & 0x1000) {
- spin_lock(&wmi->wmi_lock);
+ spin_lock_irqsave(&wmi->wmi_lock, flags);
__skb_queue_tail(&wmi->wmi_event_queue, skb);
- spin_unlock(&wmi->wmi_lock);
+ spin_unlock_irqrestore(&wmi->wmi_lock, flags);
tasklet_schedule(&wmi->wmi_event_tasklet);
return;
}
/* Check if there has been a timeout. */
- spin_lock(&wmi->wmi_lock);
+ spin_lock_irqsave(&wmi->wmi_lock, flags);
if (be16_to_cpu(hdr->seq_no) != wmi->last_seq_id) {
- spin_unlock(&wmi->wmi_lock);
+ spin_unlock_irqrestore(&wmi->wmi_lock, flags);
goto free_skb;
}
- spin_unlock(&wmi->wmi_lock);
+ spin_unlock_irqrestore(&wmi->wmi_lock, flags);
/* WMI command response */
ath9k_wmi_rsp_callback(wmi, skb);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 7fdb152be0bb..43b6c8508e49 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -62,7 +62,7 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_status *ts, int nframes, int nbad,
int txok);
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
- int seqno);
+ struct ath_buf *bf);
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
struct ath_txq *txq,
struct ath_atx_tid *tid,
@@ -86,7 +86,8 @@ static void ath_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = info->status.status_driver_data[0];
- if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
+ if (info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
+ IEEE80211_TX_STATUS_EOSP)) {
ieee80211_tx_status(hw, skb);
return;
}
@@ -295,7 +296,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
}
if (fi->baw_tracked) {
- ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
+ ath_tx_update_baw(sc, tid, bf);
sendbar = true;
}
@@ -311,10 +312,15 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
}
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
- int seqno)
+ struct ath_buf *bf)
{
+ struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
+ u16 seqno = bf->bf_state.seqno;
int index, cindex;
+ if (!fi->baw_tracked)
+ return;
+
index = ATH_BA_INDEX(tid->seq_start, seqno);
cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
@@ -335,6 +341,9 @@ static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
u16 seqno = bf->bf_state.seqno;
int index, cindex;
+ if (fi->baw_tracked)
+ return;
+
index = ATH_BA_INDEX(tid->seq_start, seqno);
cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
__set_bit(cindex, tid->tx_buf);
@@ -611,7 +620,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
* complete the acked-ones/xretried ones; update
* block-ack window
*/
- ath_tx_update_baw(sc, tid, seqno);
+ ath_tx_update_baw(sc, tid, bf);
if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
memcpy(tx_info->control.rates, rates, sizeof(rates));
@@ -641,7 +650,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
* run out of tx buf.
*/
if (!tbf) {
- ath_tx_update_baw(sc, tid, seqno);
+ ath_tx_update_baw(sc, tid, bf);
ath_tx_complete_buf(sc, bf, txq,
&bf_head, NULL, ts,
@@ -969,7 +978,8 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
bf->bf_lastbf = bf;
tx_info = IEEE80211_SKB_CB(skb);
- tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
+ tx_info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT |
+ IEEE80211_TX_STATUS_EOSP);
/*
* No aggregation session is running, but there may be frames
@@ -1009,11 +1019,14 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
INIT_LIST_HEAD(&bf_head);
list_add(&bf->list, &bf_head);
- ath_tx_update_baw(sc, tid, seqno);
+ ath_tx_update_baw(sc, tid, bf);
ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
continue;
}
+ if (bf_isampdu(bf))
+ ath_tx_addto_baw(sc, tid, bf);
+
return bf;
}
@@ -1071,8 +1084,6 @@ ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
bf->bf_next = NULL;
/* link buffers of this frame to the aggregate */
- if (!fi->baw_tracked)
- ath_tx_addto_baw(sc, tid, bf);
bf->bf_state.ndelim = ndelim;
list_add_tail(&bf->list, bf_q);
@@ -1659,6 +1670,22 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
}
}
+
+static void
+ath9k_set_moredata(struct ath_softc *sc, struct ath_buf *bf, bool val)
+{
+ struct ieee80211_hdr *hdr;
+ u16 mask = cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+ u16 mask_val = mask * val;
+
+ hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;
+ if ((hdr->frame_control & mask) != mask_val) {
+ hdr->frame_control = (hdr->frame_control & ~mask) | mask_val;
+ dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
+ sizeof(*hdr), DMA_TO_DEVICE);
+ }
+}
+
void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
u16 tids, int nframes,
@@ -1689,12 +1716,11 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
if (!bf)
break;
+ ath9k_set_moredata(sc, bf, true);
list_add_tail(&bf->list, &bf_q);
ath_set_rates(tid->an->vif, tid->an->sta, bf);
- if (bf_isampdu(bf)) {
- ath_tx_addto_baw(sc, tid, bf);
+ if (bf_isampdu(bf))
bf->bf_state.bf_type &= ~BUF_AGGR;
- }
if (bf_tail)
bf_tail->bf_next = bf;
@@ -1712,6 +1738,9 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
if (list_empty(&bf_q))
return;
+ if (!more_data)
+ ath9k_set_moredata(sc, bf_tail, false);
+
info = IEEE80211_SKB_CB(bf_tail->bf_mpdu);
info->flags |= IEEE80211_TX_STATUS_EOSP;
@@ -2407,7 +2436,6 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
.txq = sc->beacon.cabq
};
struct ath_tx_info info = {};
- struct ieee80211_hdr *hdr;
struct ath_buf *bf_tail = NULL;
struct ath_buf *bf;
LIST_HEAD(bf_q);
@@ -2451,15 +2479,10 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (list_empty(&bf_q))
return;
- bf = list_first_entry(&bf_q, struct ath_buf, list);
- hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;
-
- if (hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_MOREDATA)) {
- hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
- dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
- sizeof(*hdr), DMA_TO_DEVICE);
- }
+ bf = list_last_entry(&bf_q, struct ath_buf, list);
+ ath9k_set_moredata(sc, bf, false);
+ bf = list_first_entry(&bf_q, struct ath_buf, list);
ath_txq_lock(sc, txctl.txq);
ath_tx_fill_desc(sc, bf, txctl.txq, 0);
ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false);
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index aeb5e6e806be..79998a3ddb7a 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -493,7 +493,7 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
{
struct wcn36xx *wcn = hw->priv;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
- struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
+ struct wcn36xx_sta *sta_priv = sta ? wcn36xx_sta_to_priv(sta) : NULL;
int ret = 0;
u8 key[WLAN_MAX_KEY_LEN];
@@ -512,7 +512,7 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
break;
case WLAN_CIPHER_SUITE_WEP104:
- vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
+ vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP104;
break;
case WLAN_CIPHER_SUITE_CCMP:
vif_priv->encrypt_type = WCN36XX_HAL_ED_CCMP;
@@ -567,15 +567,19 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key_conf->keyidx,
key_conf->keylen,
key);
+
if ((WLAN_CIPHER_SUITE_WEP40 == key_conf->cipher) ||
(WLAN_CIPHER_SUITE_WEP104 == key_conf->cipher)) {
- sta_priv->is_data_encrypted = true;
- wcn36xx_smd_set_stakey(wcn,
- vif_priv->encrypt_type,
- key_conf->keyidx,
- key_conf->keylen,
- key,
- get_sta_index(vif, sta_priv));
+ list_for_each_entry(sta_priv,
+ &vif_priv->sta_list, list) {
+ sta_priv->is_data_encrypted = true;
+ wcn36xx_smd_set_stakey(wcn,
+ vif_priv->encrypt_type,
+ key_conf->keyidx,
+ key_conf->keylen,
+ key,
+ get_sta_index(vif, sta_priv));
+ }
}
}
break;
@@ -984,6 +988,7 @@ static int wcn36xx_add_interface(struct ieee80211_hw *hw,
mutex_lock(&wcn->conf_mutex);
vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX;
+ INIT_LIST_HEAD(&vif_priv->sta_list);
list_add(&vif_priv->list, &wcn->vif_list);
wcn36xx_smd_add_sta_self(wcn, vif);
@@ -1005,6 +1010,8 @@ static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
spin_lock_init(&sta_priv->ampdu_lock);
sta_priv->vif = vif_priv;
+ list_add(&sta_priv->list, &vif_priv->sta_list);
+
/*
* For STA mode HW will be configured on BSS_CHANGED_ASSOC because
* at this stage AID is not available yet.
@@ -1032,6 +1039,7 @@ static int wcn36xx_sta_remove(struct ieee80211_hw *hw,
mutex_lock(&wcn->conf_mutex);
+ list_del(&sta_priv->list);
wcn36xx_smd_delete_sta(wcn, sta_priv->sta_index);
sta_priv->vif = NULL;
@@ -1153,8 +1161,6 @@ static const struct ieee80211_ops wcn36xx_ops = {
static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
{
- int ret = 0;
-
static const u32 cipher_suites[] = {
WLAN_CIPHER_SUITE_WEP40,
WLAN_CIPHER_SUITE_WEP104,
@@ -1201,7 +1207,7 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
wiphy_ext_feature_set(wcn->hw->wiphy,
NL80211_EXT_FEATURE_CQM_RSSI_LIST);
- return ret;
+ return 0;
}
static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index b4dadf75d565..00098f24116d 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -250,7 +250,7 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
{
- int ret = 0;
+ int ret;
unsigned long start;
struct wcn36xx_hal_msg_header *hdr =
(struct wcn36xx_hal_msg_header *)wcn->hal_buf;
@@ -446,7 +446,7 @@ static int wcn36xx_smd_start_rsp(struct wcn36xx *wcn, void *buf, size_t len)
int wcn36xx_smd_start(struct wcn36xx *wcn)
{
struct wcn36xx_hal_mac_start_req_msg msg_body, *body;
- int ret = 0;
+ int ret;
int i;
size_t len;
@@ -493,7 +493,7 @@ out:
int wcn36xx_smd_stop(struct wcn36xx *wcn)
{
struct wcn36xx_hal_mac_stop_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_STOP_REQ);
@@ -520,7 +520,7 @@ out:
int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode)
{
struct wcn36xx_hal_init_scan_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_INIT_SCAN_REQ);
@@ -549,7 +549,7 @@ out:
int wcn36xx_smd_start_scan(struct wcn36xx *wcn, u8 scan_channel)
{
struct wcn36xx_hal_start_scan_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_REQ);
@@ -579,7 +579,7 @@ out:
int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel)
{
struct wcn36xx_hal_end_scan_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_END_SCAN_REQ);
@@ -610,7 +610,7 @@ int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
enum wcn36xx_hal_sys_mode mode)
{
struct wcn36xx_hal_finish_scan_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_FINISH_SCAN_REQ);
@@ -732,7 +732,7 @@ out:
static int wcn36xx_smd_switch_channel_rsp(void *buf, size_t len)
{
struct wcn36xx_hal_switch_channel_rsp_msg *rsp;
- int ret = 0;
+ int ret;
ret = wcn36xx_smd_rsp_status_check(buf, len);
if (ret)
@@ -747,7 +747,7 @@ int wcn36xx_smd_switch_channel(struct wcn36xx *wcn,
struct ieee80211_vif *vif, int ch)
{
struct wcn36xx_hal_switch_channel_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_CH_SWITCH_REQ);
@@ -860,7 +860,7 @@ int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn,
u8 *channels, size_t channel_count)
{
struct wcn36xx_hal_update_scan_params_req_ex msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_SCAN_PARAM_REQ);
@@ -931,7 +931,7 @@ static int wcn36xx_smd_add_sta_self_rsp(struct wcn36xx *wcn,
int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif)
{
struct wcn36xx_hal_add_sta_self_req msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_STA_SELF_REQ);
@@ -965,7 +965,7 @@ out:
int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr)
{
struct wcn36xx_hal_del_sta_self_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_STA_SELF_REQ);
@@ -993,7 +993,7 @@ out:
int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index)
{
struct wcn36xx_hal_delete_sta_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_STA_REQ);
@@ -1040,7 +1040,7 @@ static int wcn36xx_smd_join_rsp(void *buf, size_t len)
int wcn36xx_smd_join(struct wcn36xx *wcn, const u8 *bssid, u8 *vif, u8 ch)
{
struct wcn36xx_hal_join_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_JOIN_REQ);
@@ -1089,7 +1089,7 @@ int wcn36xx_smd_set_link_st(struct wcn36xx *wcn, const u8 *bssid,
enum wcn36xx_hal_link_state state)
{
struct wcn36xx_hal_set_link_state_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_LINK_ST_REQ);
@@ -1215,7 +1215,7 @@ int wcn36xx_smd_config_sta(struct wcn36xx *wcn, struct ieee80211_vif *vif,
{
struct wcn36xx_hal_config_sta_req_msg msg;
struct wcn36xx_hal_config_sta_params *sta_params;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_STA_REQ);
@@ -1414,7 +1414,7 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
struct wcn36xx_hal_config_bss_params *bss;
struct wcn36xx_hal_config_sta_params *sta_params;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_BSS_REQ);
@@ -1579,7 +1579,7 @@ int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
u16 p2p_off)
{
struct wcn36xx_hal_send_beacon_req_msg msg_body;
- int ret = 0, pad, pvm_len;
+ int ret, pad, pvm_len;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_SEND_BEACON_REQ);
@@ -1653,7 +1653,7 @@ int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn,
struct sk_buff *skb)
{
struct wcn36xx_hal_send_probe_resp_req_msg msg;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg, WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_REQ);
@@ -1700,7 +1700,7 @@ int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
u8 sta_index)
{
struct wcn36xx_hal_set_sta_key_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_STAKEY_REQ);
@@ -1708,12 +1708,20 @@ int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
msg_body.set_sta_key_params.sta_index = sta_index;
msg_body.set_sta_key_params.enc_type = enc_type;
- msg_body.set_sta_key_params.key[0].id = keyidx;
- msg_body.set_sta_key_params.key[0].unicast = 1;
- msg_body.set_sta_key_params.key[0].direction = WCN36XX_HAL_TX_RX;
- msg_body.set_sta_key_params.key[0].pae_role = 0;
- msg_body.set_sta_key_params.key[0].length = keylen;
- memcpy(msg_body.set_sta_key_params.key[0].key, key, keylen);
+ if (enc_type == WCN36XX_HAL_ED_WEP104 ||
+ enc_type == WCN36XX_HAL_ED_WEP40) {
+ /* Use bss key for wep (static) */
+ msg_body.set_sta_key_params.def_wep_idx = keyidx;
+ msg_body.set_sta_key_params.wep_type = 0;
+ } else {
+ msg_body.set_sta_key_params.key[0].id = keyidx;
+ msg_body.set_sta_key_params.key[0].unicast = 1;
+ msg_body.set_sta_key_params.key[0].direction = WCN36XX_HAL_TX_RX;
+ msg_body.set_sta_key_params.key[0].pae_role = 0;
+ msg_body.set_sta_key_params.key[0].length = keylen;
+ memcpy(msg_body.set_sta_key_params.key[0].key, key, keylen);
+ }
+
msg_body.set_sta_key_params.single_tid_rc = 1;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@@ -1741,7 +1749,7 @@ int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn,
u8 *key)
{
struct wcn36xx_hal_set_bss_key_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_BSSKEY_REQ);
@@ -1778,7 +1786,7 @@ int wcn36xx_smd_remove_stakey(struct wcn36xx *wcn,
u8 sta_index)
{
struct wcn36xx_hal_remove_sta_key_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_STAKEY_REQ);
@@ -1810,7 +1818,7 @@ int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
u8 keyidx)
{
struct wcn36xx_hal_remove_bss_key_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_BSSKEY_REQ);
@@ -1839,7 +1847,7 @@ int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
{
struct wcn36xx_hal_enter_bmps_req_msg msg_body;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_ENTER_BMPS_REQ);
@@ -1869,7 +1877,7 @@ int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
{
struct wcn36xx_hal_exit_bmps_req_msg msg_body;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_EXIT_BMPS_REQ);
@@ -1895,7 +1903,7 @@ out:
int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim)
{
struct wcn36xx_hal_set_power_params_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_POWER_PARAMS_REQ);
@@ -1930,7 +1938,7 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
{
struct wcn36xx_hal_keep_alive_req_msg msg_body;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_KEEP_ALIVE_REQ);
@@ -1968,7 +1976,7 @@ int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
u32 arg3, u32 arg4, u32 arg5)
{
struct wcn36xx_hal_dump_cmd_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_DUMP_COMMAND_REQ);
@@ -2013,7 +2021,6 @@ void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
{
int arr_idx, bit_idx;
- int ret = 0;
if (cap < 0 || cap > 127) {
wcn36xx_warn("error cap idx %d\n", cap);
@@ -2022,8 +2029,8 @@ int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
arr_idx = cap / 32;
bit_idx = cap % 32;
- ret = (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0;
- return ret;
+
+ return (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0;
}
void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
@@ -2043,7 +2050,7 @@ void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
{
struct wcn36xx_hal_feat_caps_msg msg_body, *rsp;
- int ret = 0, i;
+ int ret, i;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ);
@@ -2079,7 +2086,7 @@ int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
u8 sta_index)
{
struct wcn36xx_hal_add_ba_session_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_SESSION_REQ);
@@ -2117,7 +2124,7 @@ out:
int wcn36xx_smd_add_ba(struct wcn36xx *wcn)
{
struct wcn36xx_hal_add_ba_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_REQ);
@@ -2145,7 +2152,7 @@ out:
int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index)
{
struct wcn36xx_hal_del_ba_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_BA_REQ);
@@ -2185,7 +2192,7 @@ int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
{
struct wcn36xx_hal_trigger_ba_req_msg msg_body;
struct wcn36xx_hal_trigger_ba_req_candidate *candidate;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_TRIGGER_BA_REQ);
@@ -2364,7 +2371,7 @@ int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value)
{
struct wcn36xx_hal_update_cfg_req_msg msg_body, *body;
size_t len;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_CFG_REQ);
@@ -2399,7 +2406,7 @@ int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn,
{
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg *msg_body = NULL;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index 11e74015c79a..a58f313983b9 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -129,6 +129,8 @@ struct wcn36xx_vif {
u8 self_sta_index;
u8 self_dpu_desc_index;
u8 self_ucast_dpu_sign;
+
+ struct list_head sta_list;
};
/**
@@ -154,6 +156,7 @@ struct wcn36xx_vif {
* |______________|_____________|_______________|
*/
struct wcn36xx_sta {
+ struct list_head list;
struct wcn36xx_vif *vif;
u16 aid;
u16 tid;
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index 398edd2a7f2b..d3d61ae459e2 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -9,6 +9,7 @@ wil6210-$(CONFIG_WIL6210_DEBUGFS) += debugfs.o
wil6210-y += wmi.o
wil6210-y += interrupt.o
wil6210-y += txrx.o
+wil6210-y += txrx_edma.o
wil6210-y += debug.o
wil6210-y += rx_reorder.o
wil6210-y += fw.o
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 78946f28d0c7..f79c337105cb 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -302,14 +302,14 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
sinfo->generation = wil->sinfo_gen;
- sinfo->filled = BIT(NL80211_STA_INFO_RX_BYTES) |
- BIT(NL80211_STA_INFO_TX_BYTES) |
- BIT(NL80211_STA_INFO_RX_PACKETS) |
- BIT(NL80211_STA_INFO_TX_PACKETS) |
- BIT(NL80211_STA_INFO_RX_BITRATE) |
- BIT(NL80211_STA_INFO_TX_BITRATE) |
- BIT(NL80211_STA_INFO_RX_DROP_MISC) |
- BIT(NL80211_STA_INFO_TX_FAILED);
+ sinfo->filled = BIT_ULL(NL80211_STA_INFO_RX_BYTES) |
+ BIT_ULL(NL80211_STA_INFO_TX_BYTES) |
+ BIT_ULL(NL80211_STA_INFO_RX_PACKETS) |
+ BIT_ULL(NL80211_STA_INFO_TX_PACKETS) |
+ BIT_ULL(NL80211_STA_INFO_RX_BITRATE) |
+ BIT_ULL(NL80211_STA_INFO_TX_BITRATE) |
+ BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC) |
+ BIT_ULL(NL80211_STA_INFO_TX_FAILED);
sinfo->txrate.flags = RATE_INFO_FLAGS_60G;
sinfo->txrate.mcs = le16_to_cpu(reply.evt.bf_mcs);
@@ -322,7 +322,7 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
sinfo->tx_failed = stats->tx_errors;
if (test_bit(wil_vif_fwconnected, vif->status)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING,
wil->fw_capabilities))
sinfo->signal = reply.evt.rssi;
@@ -689,11 +689,12 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
wil_dbg_misc(wil, "scan: wdev=0x%p iftype=%d\n", wdev, wdev->iftype);
- /* check we are client side */
+ /* scan is supported on client interfaces and on AP interface */
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_DEVICE:
+ case NL80211_IFTYPE_AP:
break;
default:
return -EOPNOTSUPP;
@@ -1089,18 +1090,51 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
int rc;
bool tx_status;
- /* Note, currently we do not support the "wait" parameter, user-space
- * must call remain_on_channel before mgmt_tx or listen on a channel
- * another way (AP/PCP or connected station)
- * in addition we need to check if specified "chan" argument is
- * different from currently "listened" channel and fail if it is.
+ wil_dbg_misc(wil, "mgmt_tx: channel %d offchan %d, wait %d\n",
+ params->chan ? params->chan->hw_value : -1,
+ params->offchan,
+ params->wait);
+
+ /* Note, currently we support the "wait" parameter only on AP mode.
+ * In other modes, user-space must call remain_on_channel before
+ * mgmt_tx or listen on a channel other than active one.
*/
- rc = wmi_mgmt_tx(vif, buf, len);
- tx_status = (rc == 0);
+ if (params->chan && params->chan->hw_value == 0) {
+ wil_err(wil, "invalid channel\n");
+ return -EINVAL;
+ }
+ if (wdev->iftype != NL80211_IFTYPE_AP) {
+ wil_dbg_misc(wil,
+ "send WMI_SW_TX_REQ_CMDID on non-AP interfaces\n");
+ rc = wmi_mgmt_tx(vif, buf, len);
+ goto out;
+ }
+
+ if (!params->chan || params->chan->hw_value == vif->channel) {
+ wil_dbg_misc(wil,
+ "send WMI_SW_TX_REQ_CMDID for on-channel\n");
+ rc = wmi_mgmt_tx(vif, buf, len);
+ goto out;
+ }
+
+ if (params->offchan == 0) {
+ wil_err(wil,
+ "invalid channel params: current %d requested %d, off-channel not allowed\n",
+ vif->channel, params->chan->hw_value);
+ return -EBUSY;
+ }
+
+ /* use the wmi_mgmt_tx_ext only on AP mode and off-channel */
+ rc = wmi_mgmt_tx_ext(vif, buf, len, params->chan->hw_value,
+ params->wait);
+
+out:
+ tx_status = (rc == 0);
cfg80211_mgmt_tx_status(wdev, cookie ? *cookie : 0, buf, len,
tx_status, GFP_KERNEL);
+
return rc;
}
@@ -1726,7 +1760,7 @@ static int wil_cfg80211_change_station(struct wiphy *wiphy,
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int authorize;
int cid, i;
- struct vring_tx_data *txdata = NULL;
+ struct wil_ring_tx_data *txdata = NULL;
wil_dbg_misc(wil, "change station %pM mask 0x%x set 0x%x mid %d\n",
mac, params->sta_flags_mask, params->sta_flags_set,
@@ -1746,20 +1780,20 @@ static int wil_cfg80211_change_station(struct wiphy *wiphy,
return -ENOLINK;
}
- for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++)
- if (wil->vring2cid_tid[i][0] == cid) {
- txdata = &wil->vring_tx_data[i];
+ for (i = 0; i < ARRAY_SIZE(wil->ring2cid_tid); i++)
+ if (wil->ring2cid_tid[i][0] == cid) {
+ txdata = &wil->ring_tx_data[i];
break;
}
if (!txdata) {
- wil_err(wil, "vring data not found\n");
+ wil_err(wil, "ring data not found\n");
return -ENOLINK;
}
authorize = params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED);
txdata->dot1x_open = authorize ? 1 : 0;
- wil_dbg_misc(wil, "cid %d vring %d authorize %d\n", cid, i,
+ wil_dbg_misc(wil, "cid %d ring %d authorize %d\n", cid, i,
txdata->dot1x_open);
return 0;
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index ebfdff4d328c..51c3330bc316 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -29,7 +29,10 @@
/* Nasty hack. Better have per device instances */
static u32 mem_addr;
static u32 dbg_txdesc_index;
-static u32 dbg_vring_index; /* 24+ for Rx, 0..23 for Tx */
+static u32 dbg_ring_index; /* 24+ for Rx, 0..23 for Tx */
+static u32 dbg_status_msg_index;
+/* 0..wil->num_rx_status_rings-1 for Rx, wil->tx_sring_idx for Tx */
+static u32 dbg_sring_index;
enum dbg_off_type {
doff_u32 = 0,
@@ -47,20 +50,53 @@ struct dbg_off {
enum dbg_off_type type;
};
-static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
- const char *name, struct vring *vring,
- char _s, char _h)
+static void wil_print_desc_edma(struct seq_file *s, struct wil6210_priv *wil,
+ struct wil_ring *ring,
+ char _s, char _h, int idx)
{
- void __iomem *x = wmi_addr(wil, vring->hwtail);
+ u8 num_of_descs;
+ bool has_skb = false;
+
+ if (ring->is_rx) {
+ struct wil_rx_enhanced_desc *rx_d =
+ (struct wil_rx_enhanced_desc *)
+ &ring->va[idx].rx.enhanced;
+ u16 buff_id = le16_to_cpu(rx_d->mac.buff_id);
+
+ has_skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
+ seq_printf(s, "%c", (has_skb) ? _h : _s);
+ } else {
+ struct wil_tx_enhanced_desc *d =
+ (struct wil_tx_enhanced_desc *)
+ &ring->va[idx].tx.enhanced;
+
+ num_of_descs = (u8)d->mac.d[2];
+ has_skb = ring->ctx[idx].skb;
+ if (num_of_descs >= 1)
+ seq_printf(s, "%c", ring->ctx[idx].skb ? _h : _s);
+ else
+ /* num_of_descs == 0, it's a frag in a list of descs */
+ seq_printf(s, "%c", has_skb ? 'h' : _s);
+ }
+}
+
+static void wil_print_ring(struct seq_file *s, struct wil6210_priv *wil,
+ const char *name, struct wil_ring *ring,
+ char _s, char _h)
+{
+ void __iomem *x = wmi_addr(wil, ring->hwtail);
u32 v;
- seq_printf(s, "VRING %s = {\n", name);
- seq_printf(s, " pa = %pad\n", &vring->pa);
- seq_printf(s, " va = 0x%p\n", vring->va);
- seq_printf(s, " size = %d\n", vring->size);
- seq_printf(s, " swtail = %d\n", vring->swtail);
- seq_printf(s, " swhead = %d\n", vring->swhead);
- seq_printf(s, " hwtail = [0x%08x] -> ", vring->hwtail);
+ seq_printf(s, "RING %s = {\n", name);
+ seq_printf(s, " pa = %pad\n", &ring->pa);
+ seq_printf(s, " va = 0x%p\n", ring->va);
+ seq_printf(s, " size = %d\n", ring->size);
+ if (wil->use_enhanced_dma_hw && ring->is_rx)
+ seq_printf(s, " swtail = %u\n", *ring->edma_rx_swtail.va);
+ else
+ seq_printf(s, " swtail = %d\n", ring->swtail);
+ seq_printf(s, " swhead = %d\n", ring->swhead);
+ seq_printf(s, " hwtail = [0x%08x] -> ", ring->hwtail);
if (x) {
v = readl(x);
seq_printf(s, "0x%08x = %d\n", v, v);
@@ -68,41 +104,45 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
seq_puts(s, "???\n");
}
- if (vring->va && (vring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) {
+ if (ring->va && (ring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) {
uint i;
- for (i = 0; i < vring->size; i++) {
- volatile struct vring_tx_desc *d = &vring->va[i].tx;
-
- if ((i % 128) == 0 && (i != 0))
+ for (i = 0; i < ring->size; i++) {
+ if ((i % 128) == 0 && i != 0)
seq_puts(s, "\n");
- seq_printf(s, "%c", (d->dma.status & BIT(0)) ?
- _s : (vring->ctx[i].skb ? _h : 'h'));
+ if (wil->use_enhanced_dma_hw) {
+ wil_print_desc_edma(s, wil, ring, _s, _h, i);
+ } else {
+ volatile struct vring_tx_desc *d =
+ &ring->va[i].tx.legacy;
+ seq_printf(s, "%c", (d->dma.status & BIT(0)) ?
+ _s : (ring->ctx[i].skb ? _h : 'h'));
+ }
}
seq_puts(s, "\n");
}
seq_puts(s, "}\n");
}
-static int wil_vring_debugfs_show(struct seq_file *s, void *data)
+static int wil_ring_debugfs_show(struct seq_file *s, void *data)
{
uint i;
struct wil6210_priv *wil = s->private;
- wil_print_vring(s, wil, "rx", &wil->vring_rx, 'S', '_');
+ wil_print_ring(s, wil, "rx", &wil->ring_rx, 'S', '_');
- for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
- struct vring *vring = &wil->vring_tx[i];
- struct vring_tx_data *txdata = &wil->vring_tx_data[i];
+ for (i = 0; i < ARRAY_SIZE(wil->ring_tx); i++) {
+ struct wil_ring *ring = &wil->ring_tx[i];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
- if (vring->va) {
- int cid = wil->vring2cid_tid[i][0];
- int tid = wil->vring2cid_tid[i][1];
- u32 swhead = vring->swhead;
- u32 swtail = vring->swtail;
- int used = (vring->size + swhead - swtail)
- % vring->size;
- int avail = vring->size - used - 1;
+ if (ring->va) {
+ int cid = wil->ring2cid_tid[i][0];
+ int tid = wil->ring2cid_tid[i][1];
+ u32 swhead = ring->swhead;
+ u32 swtail = ring->swtail;
+ int used = (ring->size + swhead - swtail)
+ % ring->size;
+ int avail = ring->size - used - 1;
char name[10];
char sidle[10];
/* performance monitoring */
@@ -137,20 +177,88 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data)
txdata->dot1x_open ? "+" : "-",
used, avail, sidle);
- wil_print_vring(s, wil, name, vring, '_', 'H');
+ wil_print_ring(s, wil, name, ring, '_', 'H');
+ }
+ }
+
+ return 0;
+}
+
+static int wil_ring_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_ring_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_ring = {
+ .open = wil_ring_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static void wil_print_sring(struct seq_file *s, struct wil6210_priv *wil,
+ struct wil_status_ring *sring)
+{
+ void __iomem *x = wmi_addr(wil, sring->hwtail);
+ int sring_idx = sring - wil->srings;
+ u32 v;
+
+ seq_printf(s, "Status Ring %s [ %d ] = {\n",
+ sring->is_rx ? "RX" : "TX", sring_idx);
+ seq_printf(s, " pa = %pad\n", &sring->pa);
+ seq_printf(s, " va = 0x%pK\n", sring->va);
+ seq_printf(s, " size = %d\n", sring->size);
+ seq_printf(s, " elem_size = %zu\n", sring->elem_size);
+ seq_printf(s, " swhead = %d\n", sring->swhead);
+ seq_printf(s, " hwtail = [0x%08x] -> ", sring->hwtail);
+ if (x) {
+ v = readl_relaxed(x);
+ seq_printf(s, "0x%08x = %d\n", v, v);
+ } else {
+ seq_puts(s, "???\n");
+ }
+ seq_printf(s, " desc_rdy_pol = %d\n", sring->desc_rdy_pol);
+
+ if (sring->va && (sring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) {
+ uint i;
+
+ for (i = 0; i < sring->size; i++) {
+ u32 *sdword_0 =
+ (u32 *)(sring->va + (sring->elem_size * i));
+
+ if ((i % 128) == 0 && i != 0)
+ seq_puts(s, "\n");
+ if (i == sring->swhead)
+ seq_printf(s, "%c", (*sdword_0 & BIT(31)) ?
+ 'X' : 'x');
+ else
+ seq_printf(s, "%c", (*sdword_0 & BIT(31)) ?
+ '1' : '0');
}
+ seq_puts(s, "\n");
}
+ seq_puts(s, "}\n");
+}
+
+static int wil_srings_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ int i = 0;
+
+ for (i = 0; i < WIL6210_MAX_STATUS_RINGS; i++)
+ if (wil->srings[i].va)
+ wil_print_sring(s, wil, &wil->srings[i]);
return 0;
}
-static int wil_vring_seq_open(struct inode *inode, struct file *file)
+static int wil_srings_seq_open(struct inode *inode, struct file *file)
{
- return single_open(file, wil_vring_debugfs_show, inode->i_private);
+ return single_open(file, wil_srings_debugfs_show, inode->i_private);
}
-static const struct file_operations fops_vring = {
- .open = wil_vring_seq_open,
+static const struct file_operations fops_srings = {
+ .open = wil_srings_seq_open,
.release = single_release,
.read = seq_read,
.llseek = seq_lseek,
@@ -162,8 +270,8 @@ static void wil_seq_hexdump(struct seq_file *s, void *p, int len,
seq_hex_dump(s, prefix, DUMP_PREFIX_NONE, 16, 1, p, len, false);
}
-static void wil_print_ring(struct seq_file *s, const char *prefix,
- void __iomem *off)
+static void wil_print_mbox_ring(struct seq_file *s, const char *prefix,
+ void __iomem *off)
{
struct wil6210_priv *wil = s->private;
struct wil6210_mbox_ring r;
@@ -249,9 +357,9 @@ static int wil_mbox_debugfs_show(struct seq_file *s, void *data)
if (ret < 0)
return ret;
- wil_print_ring(s, "tx", wil->csr + HOST_MBOX +
+ wil_print_mbox_ring(s, "tx", wil->csr + HOST_MBOX +
offsetof(struct wil6210_mbox_ctl, tx));
- wil_print_ring(s, "rx", wil->csr + HOST_MBOX +
+ wil_print_mbox_ring(s, "rx", wil->csr + HOST_MBOX +
offsetof(struct wil6210_mbox_ctl, rx));
wil_pm_runtime_put(wil);
@@ -719,13 +827,13 @@ static ssize_t wil_write_back(struct file *file, const char __user *buf,
if ((strcmp(cmd, "add") == 0) ||
(strcmp(cmd, "del_tx") == 0)) {
- struct vring_tx_data *txdata;
+ struct wil_ring_tx_data *txdata;
if (p1 < 0 || p1 >= WIL6210_MAX_TX_RINGS) {
wil_err(wil, "BACK: invalid ring id %d\n", p1);
return -EINVAL;
}
- txdata = &wil->vring_tx_data[p1];
+ txdata = &wil->ring_tx_data[p1];
if (strcmp(cmd, "add") == 0) {
if (rc < 3) {
wil_err(wil, "BACK: add require at least 2 params\n");
@@ -972,54 +1080,93 @@ static void wil_seq_print_skb(struct seq_file *s, struct sk_buff *skb)
static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
- struct vring *vring;
- bool tx = (dbg_vring_index < WIL6210_MAX_TX_RINGS);
+ struct wil_ring *ring;
+ bool tx;
+ int ring_idx = dbg_ring_index;
+ int txdesc_idx = dbg_txdesc_index;
+ volatile struct vring_tx_desc *d;
+ volatile u32 *u;
+ struct sk_buff *skb;
+
+ if (wil->use_enhanced_dma_hw) {
+ /* RX ring index == 0 */
+ if (ring_idx >= WIL6210_MAX_TX_RINGS) {
+ seq_printf(s, "invalid ring index %d\n", ring_idx);
+ return 0;
+ }
+ tx = ring_idx > 0; /* desc ring 0 is reserved for RX */
+ } else {
+ /* RX ring index == WIL6210_MAX_TX_RINGS */
+ if (ring_idx > WIL6210_MAX_TX_RINGS) {
+ seq_printf(s, "invalid ring index %d\n", ring_idx);
+ return 0;
+ }
+ tx = (ring_idx < WIL6210_MAX_TX_RINGS);
+ }
- vring = tx ? &wil->vring_tx[dbg_vring_index] : &wil->vring_rx;
+ ring = tx ? &wil->ring_tx[ring_idx] : &wil->ring_rx;
- if (!vring->va) {
+ if (!ring->va) {
if (tx)
- seq_printf(s, "No Tx[%2d] VRING\n", dbg_vring_index);
+ seq_printf(s, "No Tx[%2d] RING\n", ring_idx);
else
- seq_puts(s, "No Rx VRING\n");
+ seq_puts(s, "No Rx RING\n");
return 0;
}
- if (dbg_txdesc_index < vring->size) {
- /* use struct vring_tx_desc for Rx as well,
- * only field used, .dma.length, is the same
- */
- volatile struct vring_tx_desc *d =
- &vring->va[dbg_txdesc_index].tx;
- volatile u32 *u = (volatile u32 *)d;
- struct sk_buff *skb = vring->ctx[dbg_txdesc_index].skb;
-
+ if (txdesc_idx >= ring->size) {
if (tx)
- seq_printf(s, "Tx[%2d][%3d] = {\n", dbg_vring_index,
- dbg_txdesc_index);
+ seq_printf(s, "[%2d] TxDesc index (%d) >= size (%d)\n",
+ ring_idx, txdesc_idx, ring->size);
else
- seq_printf(s, "Rx[%3d] = {\n", dbg_txdesc_index);
- seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
- u[0], u[1], u[2], u[3]);
- seq_printf(s, " DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n",
- u[4], u[5], u[6], u[7]);
- seq_printf(s, " SKB = 0x%p\n", skb);
+ seq_printf(s, "RxDesc index (%d) >= size (%d)\n",
+ txdesc_idx, ring->size);
+ return 0;
+ }
- if (skb) {
- skb_get(skb);
- wil_seq_print_skb(s, skb);
- kfree_skb(skb);
+ /* use struct vring_tx_desc for Rx as well,
+ * only field used, .dma.length, is the same
+ */
+ d = &ring->va[txdesc_idx].tx.legacy;
+ u = (volatile u32 *)d;
+ skb = NULL;
+
+ if (wil->use_enhanced_dma_hw) {
+ if (tx) {
+ skb = ring->ctx[txdesc_idx].skb;
+ } else {
+ struct wil_rx_enhanced_desc *rx_d =
+ (struct wil_rx_enhanced_desc *)
+ &ring->va[txdesc_idx].rx.enhanced;
+ u16 buff_id = le16_to_cpu(rx_d->mac.buff_id);
+
+ if (!wil_val_in_range(buff_id, 0,
+ wil->rx_buff_mgmt.size)) {
+ seq_printf(s, "invalid buff_id %d\n", buff_id);
+ return 0;
+ }
+ skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
}
- seq_puts(s, "}\n");
} else {
- if (tx)
- seq_printf(s, "[%2d] TxDesc index (%d) >= size (%d)\n",
- dbg_vring_index, dbg_txdesc_index,
- vring->size);
- else
- seq_printf(s, "RxDesc index (%d) >= size (%d)\n",
- dbg_txdesc_index, vring->size);
+ skb = ring->ctx[txdesc_idx].skb;
}
+ if (tx)
+ seq_printf(s, "Tx[%2d][%3d] = {\n", ring_idx,
+ txdesc_idx);
+ else
+ seq_printf(s, "Rx[%3d] = {\n", txdesc_idx);
+ seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ u[0], u[1], u[2], u[3]);
+ seq_printf(s, " DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ u[4], u[5], u[6], u[7]);
+ seq_printf(s, " SKB = 0x%p\n", skb);
+
+ if (skb) {
+ skb_get(skb);
+ wil_seq_print_skb(s, skb);
+ kfree_skb(skb);
+ }
+ seq_puts(s, "}\n");
return 0;
}
@@ -1036,6 +1183,115 @@ static const struct file_operations fops_txdesc = {
.llseek = seq_lseek,
};
+/*---------Tx/Rx status message------------*/
+static int wil_status_msg_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ int sring_idx = dbg_sring_index;
+ struct wil_status_ring *sring;
+ bool tx = sring_idx == wil->tx_sring_idx ? 1 : 0;
+ u32 status_msg_idx = dbg_status_msg_index;
+ u32 *u;
+
+ if (sring_idx >= WIL6210_MAX_STATUS_RINGS) {
+ seq_printf(s, "invalid status ring index %d\n", sring_idx);
+ return 0;
+ }
+
+ sring = &wil->srings[sring_idx];
+
+ if (!sring->va) {
+ seq_printf(s, "No %cX status ring\n", tx ? 'T' : 'R');
+ return 0;
+ }
+
+ if (status_msg_idx >= sring->size) {
+ seq_printf(s, "%cxDesc index (%d) >= size (%d)\n",
+ tx ? 'T' : 'R', status_msg_idx, sring->size);
+ return 0;
+ }
+
+ u = sring->va + (sring->elem_size * status_msg_idx);
+
+ seq_printf(s, "%cx[%d][%3d] = {\n",
+ tx ? 'T' : 'R', sring_idx, status_msg_idx);
+
+ seq_printf(s, " 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ u[0], u[1], u[2], u[3]);
+ if (!tx && !wil->use_compressed_rx_status)
+ seq_printf(s, " 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ u[4], u[5], u[6], u[7]);
+
+ seq_puts(s, "}\n");
+
+ return 0;
+}
+
+static int wil_status_msg_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_status_msg_debugfs_show,
+ inode->i_private);
+}
+
+static const struct file_operations fops_status_msg = {
+ .open = wil_status_msg_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static int wil_print_rx_buff(struct seq_file *s, struct list_head *lh)
+{
+ struct wil_rx_buff *it;
+ int i = 0;
+
+ list_for_each_entry(it, lh, list) {
+ if ((i % 16) == 0 && i != 0)
+ seq_puts(s, "\n ");
+ seq_printf(s, "[%4d] ", it->id);
+ i++;
+ }
+ seq_printf(s, "\nNumber of buffers: %u\n", i);
+
+ return i;
+}
+
+static int wil_rx_buff_mgmt_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ struct wil_rx_buff_mgmt *rbm = &wil->rx_buff_mgmt;
+ int num_active;
+ int num_free;
+
+ seq_printf(s, " size = %zu\n", rbm->size);
+ seq_printf(s, " free_list_empty_cnt = %lu\n",
+ rbm->free_list_empty_cnt);
+
+ /* Print active list */
+ seq_puts(s, " Active list:\n");
+ num_active = wil_print_rx_buff(s, &rbm->active);
+ seq_puts(s, "\n Free list:\n");
+ num_free = wil_print_rx_buff(s, &rbm->free);
+
+ seq_printf(s, " Total number of buffers: %u\n",
+ num_active + num_free);
+
+ return 0;
+}
+
+static int wil_rx_buff_mgmt_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_rx_buff_mgmt_debugfs_show,
+ inode->i_private);
+}
+
+static const struct file_operations fops_rx_buff_mgmt = {
+ .open = wil_rx_buff_mgmt_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
/*---------beamforming------------*/
static char *wil_bfstatus_str(u32 status)
{
@@ -1132,7 +1388,7 @@ static const struct file_operations fops_bf = {
};
/*---------temp------------*/
-static void print_temp(struct seq_file *s, const char *prefix, u32 t)
+static void print_temp(struct seq_file *s, const char *prefix, s32 t)
{
switch (t) {
case 0:
@@ -1140,7 +1396,8 @@ static void print_temp(struct seq_file *s, const char *prefix, u32 t)
seq_printf(s, "%s N/A\n", prefix);
break;
default:
- seq_printf(s, "%s %d.%03d\n", prefix, t / 1000, t % 1000);
+ seq_printf(s, "%s %s%d.%03d\n", prefix, (t < 0 ? "-" : ""),
+ abs(t / 1000), abs(t % 1000));
break;
}
}
@@ -1148,7 +1405,7 @@ static void print_temp(struct seq_file *s, const char *prefix, u32 t)
static int wil_temp_debugfs_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
- u32 t_m, t_r;
+ s32 t_m, t_r;
int rc = wmi_get_temperature(wil, &t_m, &t_r);
if (rc) {
@@ -1384,6 +1641,7 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
int i;
u16 index = ((r->head_seq_num - r->ssn) & 0xfff) % r->buf_size;
unsigned long long drop_dup = r->drop_dup, drop_old = r->drop_old;
+ unsigned long long drop_dup_mcast = r->drop_dup_mcast;
seq_printf(s, "([%2d]) 0x%03x [", r->buf_size, r->head_seq_num);
for (i = 0; i < r->buf_size; i++) {
@@ -1393,9 +1651,9 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
seq_printf(s, "%c", r->reorder_buf[i] ? '*' : '_');
}
seq_printf(s,
- "] total %llu drop %llu (dup %llu + old %llu) last 0x%03x\n",
- r->total, drop_dup + drop_old, drop_dup, drop_old,
- r->ssn_last_drop);
+ "] total %llu drop %llu (dup %llu + old %llu + dup mcast %llu) last 0x%03x\n",
+ r->total, drop_dup + drop_old + drop_dup_mcast, drop_dup,
+ drop_old, drop_dup_mcast, r->ssn_last_drop);
}
static void wil_print_rxtid_crypto(struct seq_file *s, int tid,
@@ -1477,6 +1735,12 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
p->stats.rx_short_frame,
p->stats.rx_large_frame,
p->stats.rx_replay);
+ seq_printf(s,
+ "mic error %lu, key error %lu, amsdu error %lu, csum error %lu\n",
+ p->stats.rx_mic_error,
+ p->stats.rx_key_error,
+ p->stats.rx_amsdu_error,
+ p->stats.rx_csum_err);
seq_puts(s, "Rx/MCS:");
for (mcs = 0; mcs < ARRAY_SIZE(p->stats.rx_per_mcs);
@@ -1538,6 +1802,343 @@ static const struct file_operations fops_mids = {
.llseek = seq_lseek,
};
+static int wil_tx_latency_debugfs_show(struct seq_file *s, void *data)
+__acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
+{
+ struct wil6210_priv *wil = s->private;
+ int i, bin;
+
+ for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+ struct wil_sta_info *p = &wil->sta[i];
+ char *status = "unknown";
+ u8 aid = 0;
+ u8 mid;
+
+ if (!p->tx_latency_bins)
+ continue;
+
+ switch (p->status) {
+ case wil_sta_unused:
+ status = "unused ";
+ break;
+ case wil_sta_conn_pending:
+ status = "pending ";
+ break;
+ case wil_sta_connected:
+ status = "connected";
+ aid = p->aid;
+ break;
+ }
+ mid = (p->status != wil_sta_unused) ? p->mid : U8_MAX;
+ seq_printf(s, "[%d] %pM %s MID %d AID %d\n", i, p->addr, status,
+ mid, aid);
+
+ if (p->status == wil_sta_connected) {
+ u64 num_packets = 0;
+ u64 tx_latency_avg = p->stats.tx_latency_total_us;
+
+ seq_puts(s, "Tx/Latency bin:");
+ for (bin = 0; bin < WIL_NUM_LATENCY_BINS; bin++) {
+ seq_printf(s, " %lld",
+ p->tx_latency_bins[bin]);
+ num_packets += p->tx_latency_bins[bin];
+ }
+ seq_puts(s, "\n");
+ if (!num_packets)
+ continue;
+ do_div(tx_latency_avg, num_packets);
+ seq_printf(s, "Tx/Latency min/avg/max (us): %d/%lld/%d",
+ p->stats.tx_latency_min_us,
+ tx_latency_avg,
+ p->stats.tx_latency_max_us);
+
+ seq_puts(s, "\n");
+ }
+ }
+
+ return 0;
+}
+
+static int wil_tx_latency_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_tx_latency_debugfs_show,
+ inode->i_private);
+}
+
+static ssize_t wil_tx_latency_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct wil6210_priv *wil = s->private;
+ int val, rc, i;
+ bool enable;
+
+ rc = kstrtoint_from_user(buf, len, 0, &val);
+ if (rc) {
+ wil_err(wil, "Invalid argument\n");
+ return rc;
+ }
+ if (val == 1)
+ /* default resolution */
+ val = 500;
+ if (val && (val < 50 || val > 1000)) {
+ wil_err(wil, "Invalid resolution %d\n", val);
+ return -EINVAL;
+ }
+
+ enable = !!val;
+ if (wil->tx_latency == enable)
+ return len;
+
+ wil_info(wil, "%s TX latency measurements (resolution %dusec)\n",
+ enable ? "Enabling" : "Disabling", val);
+
+ if (enable) {
+ size_t sz = sizeof(u64) * WIL_NUM_LATENCY_BINS;
+
+ wil->tx_latency_res = val;
+ for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+ struct wil_sta_info *sta = &wil->sta[i];
+
+ kfree(sta->tx_latency_bins);
+ sta->tx_latency_bins = kzalloc(sz, GFP_KERNEL);
+ if (!sta->tx_latency_bins)
+ return -ENOMEM;
+ sta->stats.tx_latency_min_us = U32_MAX;
+ sta->stats.tx_latency_max_us = 0;
+ sta->stats.tx_latency_total_us = 0;
+ }
+ }
+ wil->tx_latency = enable;
+
+ return len;
+}
+
+static const struct file_operations fops_tx_latency = {
+ .open = wil_tx_latency_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .write = wil_tx_latency_write,
+ .llseek = seq_lseek,
+};
+
+static void wil_link_stats_print_basic(struct wil6210_vif *vif,
+ struct seq_file *s,
+ struct wmi_link_stats_basic *basic)
+{
+ char per[5] = "?";
+
+ if (basic->per_average != 0xff)
+ snprintf(per, sizeof(per), "%d%%", basic->per_average);
+
+ seq_printf(s, "CID %d {\n"
+ "\tTxMCS %d TxTpt %d\n"
+ "\tGoodput(rx:tx) %d:%d\n"
+ "\tRxBcastFrames %d\n"
+ "\tRSSI %d SQI %d SNR %d PER %s\n"
+ "\tRx RFC %d Ant num %d\n"
+ "\tSectors(rx:tx) my %d:%d peer %d:%d\n"
+ "}\n",
+ basic->cid,
+ basic->bf_mcs, le32_to_cpu(basic->tx_tpt),
+ le32_to_cpu(basic->rx_goodput),
+ le32_to_cpu(basic->tx_goodput),
+ le32_to_cpu(basic->rx_bcast_frames),
+ basic->rssi, basic->sqi, basic->snr, per,
+ basic->selected_rfc, basic->rx_effective_ant_num,
+ basic->my_rx_sector, basic->my_tx_sector,
+ basic->other_rx_sector, basic->other_tx_sector);
+}
+
+static void wil_link_stats_print_global(struct wil6210_priv *wil,
+ struct seq_file *s,
+ struct wmi_link_stats_global *global)
+{
+ seq_printf(s, "Frames(rx:tx) %d:%d\n"
+ "BA Frames(rx:tx) %d:%d\n"
+ "Beacons %d\n"
+ "Rx Errors (MIC:CRC) %d:%d\n"
+ "Tx Errors (no ack) %d\n",
+ le32_to_cpu(global->rx_frames),
+ le32_to_cpu(global->tx_frames),
+ le32_to_cpu(global->rx_ba_frames),
+ le32_to_cpu(global->tx_ba_frames),
+ le32_to_cpu(global->tx_beacons),
+ le32_to_cpu(global->rx_mic_errors),
+ le32_to_cpu(global->rx_crc_errors),
+ le32_to_cpu(global->tx_fail_no_ack));
+}
+
+static void wil_link_stats_debugfs_show_vif(struct wil6210_vif *vif,
+ struct seq_file *s)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wmi_link_stats_basic *stats;
+ int i;
+
+ if (!vif->fw_stats_ready) {
+ seq_puts(s, "no statistics\n");
+ return;
+ }
+
+ seq_printf(s, "TSF %lld\n", vif->fw_stats_tsf);
+ for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+ if (wil->sta[i].status == wil_sta_unused)
+ continue;
+ if (wil->sta[i].mid != vif->mid)
+ continue;
+
+ stats = &wil->sta[i].fw_stats_basic;
+ wil_link_stats_print_basic(vif, s, stats);
+ }
+}
+
+static int wil_link_stats_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ struct wil6210_vif *vif;
+ int i, rc;
+
+ rc = mutex_lock_interruptible(&wil->vif_mutex);
+ if (rc)
+ return rc;
+
+ /* iterate over all MIDs and show per-cid statistics. Then show the
+ * global statistics
+ */
+ for (i = 0; i < wil->max_vifs; i++) {
+ vif = wil->vifs[i];
+
+ seq_printf(s, "MID %d ", i);
+ if (!vif) {
+ seq_puts(s, "unused\n");
+ continue;
+ }
+
+ wil_link_stats_debugfs_show_vif(vif, s);
+ }
+
+ mutex_unlock(&wil->vif_mutex);
+
+ return 0;
+}
+
+static int wil_link_stats_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_link_stats_debugfs_show, inode->i_private);
+}
+
+static ssize_t wil_link_stats_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct wil6210_priv *wil = s->private;
+ int cid, interval, rc, i;
+ struct wil6210_vif *vif;
+ char *kbuf = kmalloc(len + 1, GFP_KERNEL);
+
+ if (!kbuf)
+ return -ENOMEM;
+
+ rc = simple_write_to_buffer(kbuf, len, ppos, buf, len);
+ if (rc != len) {
+ kfree(kbuf);
+ return rc >= 0 ? -EIO : rc;
+ }
+
+ kbuf[len] = '\0';
+ /* specify cid (use -1 for all cids) and snapshot interval in ms */
+ rc = sscanf(kbuf, "%d %d", &cid, &interval);
+ kfree(kbuf);
+ if (rc < 0)
+ return rc;
+ if (rc < 2 || interval < 0)
+ return -EINVAL;
+
+ wil_info(wil, "request link statistics, cid %d interval %d\n",
+ cid, interval);
+
+ rc = mutex_lock_interruptible(&wil->vif_mutex);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < wil->max_vifs; i++) {
+ vif = wil->vifs[i];
+ if (!vif)
+ continue;
+
+ rc = wmi_link_stats_cfg(vif, WMI_LINK_STATS_TYPE_BASIC,
+ (cid == -1 ? 0xff : cid), interval);
+ if (rc)
+ wil_err(wil, "link statistics failed for mid %d\n", i);
+ }
+ mutex_unlock(&wil->vif_mutex);
+
+ return len;
+}
+
+static const struct file_operations fops_link_stats = {
+ .open = wil_link_stats_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .write = wil_link_stats_write,
+ .llseek = seq_lseek,
+};
+
+static int
+wil_link_stats_global_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+
+ if (!wil->fw_stats_global.ready)
+ return 0;
+
+ seq_printf(s, "TSF %lld\n", wil->fw_stats_global.tsf);
+ wil_link_stats_print_global(wil, s, &wil->fw_stats_global.stats);
+
+ return 0;
+}
+
+static int
+wil_link_stats_global_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_link_stats_global_debugfs_show,
+ inode->i_private);
+}
+
+static ssize_t
+wil_link_stats_global_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct wil6210_priv *wil = s->private;
+ int interval, rc;
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+
+ /* specify snapshot interval in ms */
+ rc = kstrtoint_from_user(buf, len, 0, &interval);
+ if (rc || interval < 0) {
+ wil_err(wil, "Invalid argument\n");
+ return -EINVAL;
+ }
+
+ wil_info(wil, "request global link stats, interval %d\n", interval);
+
+ rc = wmi_link_stats_cfg(vif, WMI_LINK_STATS_TYPE_GLOBAL, 0, interval);
+ if (rc)
+ wil_err(wil, "global link stats failed %d\n", rc);
+
+ return rc ? rc : len;
+}
+
+static const struct file_operations fops_link_stats_global = {
+ .open = wil_link_stats_global_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .write = wil_link_stats_global_write,
+ .llseek = seq_lseek,
+};
+
static ssize_t wil_read_file_led_cfg(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -1760,6 +2361,60 @@ static const struct file_operations fops_suspend_stats = {
.open = simple_open,
};
+/*---------compressed_rx_status---------*/
+static ssize_t wil_compressed_rx_status_write(struct file *file,
+ const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct wil6210_priv *wil = s->private;
+ int compressed_rx_status;
+ int rc;
+
+ rc = kstrtoint_from_user(buf, len, 0, &compressed_rx_status);
+ if (rc) {
+ wil_err(wil, "Invalid argument\n");
+ return rc;
+ }
+
+ if (wil_has_active_ifaces(wil, true, false)) {
+ wil_err(wil, "cannot change edma config after iface is up\n");
+ return -EPERM;
+ }
+
+ wil_info(wil, "%sable compressed_rx_status\n",
+ compressed_rx_status ? "En" : "Dis");
+
+ wil->use_compressed_rx_status = compressed_rx_status;
+
+ return len;
+}
+
+static int
+wil_compressed_rx_status_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+
+ seq_printf(s, "%d\n", wil->use_compressed_rx_status);
+
+ return 0;
+}
+
+static int
+wil_compressed_rx_status_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_compressed_rx_status_show,
+ inode->i_private);
+}
+
+static const struct file_operations fops_compressed_rx_status = {
+ .open = wil_compressed_rx_status_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .write = wil_compressed_rx_status_write,
+ .llseek = seq_lseek,
+};
+
/*----------------*/
static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
struct dentry *dbg)
@@ -1790,7 +2445,7 @@ static const struct {
const struct file_operations *fops;
} dbg_files[] = {
{"mbox", 0444, &fops_mbox},
- {"vrings", 0444, &fops_vring},
+ {"rings", 0444, &fops_ring},
{"stations", 0444, &fops_sta},
{"mids", 0444, &fops_mids},
{"desc", 0444, &fops_txdesc},
@@ -1813,6 +2468,13 @@ static const struct {
{"fw_capabilities", 0444, &fops_fw_capabilities},
{"fw_version", 0444, &fops_fw_version},
{"suspend_stats", 0644, &fops_suspend_stats},
+ {"compressed_rx_status", 0644, &fops_compressed_rx_status},
+ {"srings", 0444, &fops_srings},
+ {"status_msg", 0444, &fops_status_msg},
+ {"rx_buff_mgmt", 0444, &fops_rx_buff_mgmt},
+ {"tx_latency", 0644, &fops_tx_latency},
+ {"link_stats", 0644, &fops_link_stats},
+ {"link_stats_global", 0644, &fops_link_stats_global},
};
static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
@@ -1858,7 +2520,12 @@ static const struct dbg_off dbg_wil_off[] = {
WIL_FIELD(chip_revision, 0444, doff_u8),
WIL_FIELD(abft_len, 0644, doff_u8),
WIL_FIELD(wakeup_trigger, 0644, doff_u8),
- WIL_FIELD(vring_idle_trsh, 0644, doff_u32),
+ WIL_FIELD(ring_idle_trsh, 0644, doff_u32),
+ WIL_FIELD(num_rx_status_rings, 0644, doff_u8),
+ WIL_FIELD(rx_status_ring_order, 0644, doff_u32),
+ WIL_FIELD(tx_status_ring_order, 0644, doff_u32),
+ WIL_FIELD(rx_buff_id_count, 0644, doff_u32),
+ WIL_FIELD(amsdu_en, 0644, doff_u8),
{},
};
@@ -1872,9 +2539,11 @@ static const struct dbg_off dbg_wil_regs[] = {
/* static parameters */
static const struct dbg_off dbg_statics[] = {
{"desc_index", 0644, (ulong)&dbg_txdesc_index, doff_u32},
- {"vring_index", 0644, (ulong)&dbg_vring_index, doff_u32},
+ {"ring_index", 0644, (ulong)&dbg_ring_index, doff_u32},
{"mem_addr", 0644, (ulong)&mem_addr, doff_u32},
{"led_polarity", 0644, (ulong)&led_polarity, doff_u8},
+ {"status_index", 0644, (ulong)&dbg_status_msg_index, doff_u32},
+ {"sring_index", 0644, (ulong)&dbg_sring_index, doff_u32},
{},
};
@@ -1922,10 +2591,14 @@ int wil6210_debugfs_init(struct wil6210_priv *wil)
void wil6210_debugfs_remove(struct wil6210_priv *wil)
{
+ int i;
+
debugfs_remove_recursive(wil->debug);
wil->debug = NULL;
kfree(wil->dbg_data.data_arr);
+ for (i = 0; i < ARRAY_SIZE(wil->sta); i++)
+ kfree(wil->sta[i].tx_latency_bins);
/* free pmc memory without sending command to fw, as it will
* be reset on the way down anyway
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
index e7ff41e623d2..a04c87ffd37b 100644
--- a/drivers/net/wireless/ath/wil6210/ethtool.c
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -101,7 +101,7 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
if (ret < 0)
return ret;
- wil_configure_interrupt_moderation(wil);
+ wil->txrx_ops.configure_interrupt_moderation(wil);
wil_pm_runtime_put(wil);
diff --git a/drivers/net/wireless/ath/wil6210/fw.c b/drivers/net/wireless/ath/wil6210/fw.c
index 540fc20984d8..3e2bbbceca06 100644
--- a/drivers/net/wireless/ath/wil6210/fw.c
+++ b/drivers/net/wireless/ath/wil6210/fw.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2014-2015,2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -22,6 +23,8 @@
MODULE_FIRMWARE(WIL_FW_NAME_DEFAULT);
MODULE_FIRMWARE(WIL_FW_NAME_SPARROW_PLUS);
MODULE_FIRMWARE(WIL_BOARD_FILE_NAME);
+MODULE_FIRMWARE(WIL_FW_NAME_TALYN);
+MODULE_FIRMWARE(WIL_BRD_NAME_TALYN);
static
void wil_memset_toio_32(volatile void __iomem *dst, u32 val,
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index 718161b829c2..388b3d4717ca 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -145,7 +145,7 @@ fw_handle_capabilities(struct wil6210_priv *wil, const void *data,
capabilities);
bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
memcpy(wil->fw_capabilities, rec->capabilities,
- min(sizeof(wil->fw_capabilities), capa_size));
+ min_t(size_t, sizeof(wil->fw_capabilities), capa_size));
wil_hex_dump_fw("CAPA", DUMP_PREFIX_OFFSET, 16, 1,
rec->capabilities, capa_size, false);
return 0;
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 84e9840c1752..5d287a8e1b45 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -44,6 +44,8 @@
(~(BIT_DMA_EP_RX_ICR_RX_HTRSH)))
#define WIL6210_IMC_TX (BIT_DMA_EP_TX_ICR_TX_DONE | \
BIT_DMA_EP_TX_ICR_TX_DONE_N(0))
+#define WIL6210_IMC_TX_EDMA BIT_TX_STATUS_IRQ
+#define WIL6210_IMC_RX_EDMA BIT_RX_STATUS_IRQ
#define WIL6210_IMC_MISC_NO_HALP (ISR_MISC_FW_READY | \
ISR_MISC_MBOX_EVT | \
ISR_MISC_FW_ERROR)
@@ -87,12 +89,24 @@ static void wil6210_mask_irq_tx(struct wil6210_priv *wil)
WIL6210_IRQ_DISABLE);
}
+static void wil6210_mask_irq_tx_edma(struct wil6210_priv *wil)
+{
+ wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, IMS),
+ WIL6210_IRQ_DISABLE);
+}
+
static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
{
wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMS),
WIL6210_IRQ_DISABLE);
}
+static void wil6210_mask_irq_rx_edma(struct wil6210_priv *wil)
+{
+ wil_w(wil, RGF_INT_GEN_RX_ICR + offsetof(struct RGF_ICR, IMS),
+ WIL6210_IRQ_DISABLE);
+}
+
static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp)
{
wil_dbg_irq(wil, "mask_irq_misc: mask_halp(%s)\n",
@@ -125,6 +139,12 @@ void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
WIL6210_IMC_TX);
}
+void wil6210_unmask_irq_tx_edma(struct wil6210_priv *wil)
+{
+ wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, IMC),
+ WIL6210_IMC_TX_EDMA);
+}
+
void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
{
bool unmask_rx_htrsh = atomic_read(&wil->connected_vifs) > 0;
@@ -133,6 +153,12 @@ void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
unmask_rx_htrsh ? WIL6210_IMC_RX : WIL6210_IMC_RX_NO_RX_HTRSH);
}
+void wil6210_unmask_irq_rx_edma(struct wil6210_priv *wil)
+{
+ wil_w(wil, RGF_INT_GEN_RX_ICR + offsetof(struct RGF_ICR, IMC),
+ WIL6210_IMC_RX_EDMA);
+}
+
static void wil6210_unmask_irq_misc(struct wil6210_priv *wil, bool unmask_halp)
{
wil_dbg_irq(wil, "unmask_irq_misc: unmask_halp(%s)\n",
@@ -164,7 +190,9 @@ void wil_mask_irq(struct wil6210_priv *wil)
wil_dbg_irq(wil, "mask_irq\n");
wil6210_mask_irq_tx(wil);
+ wil6210_mask_irq_tx_edma(wil);
wil6210_mask_irq_rx(wil);
+ wil6210_mask_irq_rx_edma(wil);
wil6210_mask_irq_misc(wil, true);
wil6210_mask_irq_pseudo(wil);
}
@@ -179,13 +207,43 @@ void wil_unmask_irq(struct wil6210_priv *wil)
WIL_ICR_ICC_VALUE);
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICC),
WIL_ICR_ICC_MISC_VALUE);
+ wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, ICC),
+ WIL_ICR_ICC_VALUE);
+ wil_w(wil, RGF_INT_GEN_RX_ICR + offsetof(struct RGF_ICR, ICC),
+ WIL_ICR_ICC_VALUE);
wil6210_unmask_irq_pseudo(wil);
- wil6210_unmask_irq_tx(wil);
- wil6210_unmask_irq_rx(wil);
+ if (wil->use_enhanced_dma_hw) {
+ wil6210_unmask_irq_tx_edma(wil);
+ wil6210_unmask_irq_rx_edma(wil);
+ } else {
+ wil6210_unmask_irq_tx(wil);
+ wil6210_unmask_irq_rx(wil);
+ }
wil6210_unmask_irq_misc(wil, true);
}
+void wil_configure_interrupt_moderation_edma(struct wil6210_priv *wil)
+{
+ u32 moderation;
+
+ wil_s(wil, RGF_INT_GEN_IDLE_TIME_LIMIT, WIL_EDMA_IDLE_TIME_LIMIT_USEC);
+
+ wil_s(wil, RGF_INT_GEN_TIME_UNIT_LIMIT, WIL_EDMA_TIME_UNIT_CLK_CYCLES);
+
+ /* Update RX and TX moderation */
+ moderation = wil->rx_max_burst_duration |
+ (WIL_EDMA_AGG_WATERMARK << WIL_EDMA_AGG_WATERMARK_POS);
+ wil_w(wil, RGF_INT_CTRL_INT_GEN_CFG_0, moderation);
+ wil_w(wil, RGF_INT_CTRL_INT_GEN_CFG_1, moderation);
+
+ /* Treat special events as regular
+ * (set bit 0 to 0x1 and clear bits 1-8)
+ */
+ wil_c(wil, RGF_INT_COUNT_ON_SPECIAL_EVT, 0x1FE);
+ wil_s(wil, RGF_INT_COUNT_ON_SPECIAL_EVT, 0x1);
+}
+
void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
{
struct wireless_dev *wdev = wil->main_ndev->ieee80211_ptr;
@@ -294,6 +352,97 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
return IRQ_HANDLED;
}
+static irqreturn_t wil6210_irq_rx_edma(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_INT_GEN_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ bool need_unmask = true;
+
+ trace_wil6210_irq_rx(isr);
+ wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
+
+ if (unlikely(!isr)) {
+ wil_err(wil, "spurious IRQ: RX\n");
+ return IRQ_NONE;
+ }
+
+ wil6210_mask_irq_rx_edma(wil);
+
+ if (likely(isr & BIT_RX_STATUS_IRQ)) {
+ wil_dbg_irq(wil, "RX status ring\n");
+ isr &= ~BIT_RX_STATUS_IRQ;
+ if (likely(test_bit(wil_status_fwready, wil->status))) {
+ if (likely(test_bit(wil_status_napi_en, wil->status))) {
+ wil_dbg_txrx(wil, "NAPI(Rx) schedule\n");
+ need_unmask = false;
+ napi_schedule(&wil->napi_rx);
+ } else {
+ wil_err(wil,
+ "Got Rx interrupt while stopping interface\n");
+ }
+ } else {
+ wil_err(wil, "Got Rx interrupt while in reset\n");
+ }
+ }
+
+ if (unlikely(isr))
+ wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr);
+
+ /* Rx IRQ will be enabled when NAPI processing finished */
+
+ atomic_inc(&wil->isr_count_rx);
+
+ if (unlikely(need_unmask))
+ wil6210_unmask_irq_rx_edma(wil);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wil6210_irq_tx_edma(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_INT_GEN_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ bool need_unmask = true;
+
+ trace_wil6210_irq_tx(isr);
+ wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
+
+ if (unlikely(!isr)) {
+ wil_err(wil, "spurious IRQ: TX\n");
+ return IRQ_NONE;
+ }
+
+ wil6210_mask_irq_tx_edma(wil);
+
+ if (likely(isr & BIT_TX_STATUS_IRQ)) {
+ wil_dbg_irq(wil, "TX status ring\n");
+ isr &= ~BIT_TX_STATUS_IRQ;
+ if (likely(test_bit(wil_status_fwready, wil->status))) {
+ wil_dbg_txrx(wil, "NAPI(Tx) schedule\n");
+ need_unmask = false;
+ napi_schedule(&wil->napi_tx);
+ } else {
+ wil_err(wil, "Got Tx status ring IRQ while in reset\n");
+ }
+ }
+
+ if (unlikely(isr))
+ wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr);
+
+ /* Tx IRQ will be enabled when NAPI processing finished */
+
+ atomic_inc(&wil->isr_count_tx);
+
+ if (unlikely(need_unmask))
+ wil6210_unmask_irq_tx_edma(wil);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
@@ -476,6 +625,15 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
wil6210_unmask_irq_misc(wil, false);
+ /* in non-triple MSI case, this is done inside wil6210_thread_irq
+ * because it has to be done after unmasking the pseudo.
+ */
+ if (wil->n_msi == 3 && wil->suspend_resp_rcvd) {
+ wil_dbg_irq(wil, "set suspend_resp_comp to true\n");
+ wil->suspend_resp_comp = true;
+ wake_up_interruptible(&wil->wq);
+ }
+
return IRQ_HANDLED;
}
@@ -510,30 +668,53 @@ static irqreturn_t wil6210_thread_irq(int irq, void *cookie)
*/
static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause)
{
+ u32 icm_rx, icr_rx, imv_rx;
+ u32 icm_tx, icr_tx, imv_tx;
+ u32 icm_misc, icr_misc, imv_misc;
+
if (!test_bit(wil_status_irqen, wil->status)) {
- u32 icm_rx = wil_ioread32_and_clear(wil->csr +
- HOSTADDR(RGF_DMA_EP_RX_ICR) +
- offsetof(struct RGF_ICR, ICM));
- u32 icr_rx = wil_ioread32_and_clear(wil->csr +
- HOSTADDR(RGF_DMA_EP_RX_ICR) +
- offsetof(struct RGF_ICR, ICR));
- u32 imv_rx = wil_r(wil, RGF_DMA_EP_RX_ICR +
+ if (wil->use_enhanced_dma_hw) {
+ icm_rx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_INT_GEN_RX_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ icr_rx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_INT_GEN_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ imv_rx = wil_r(wil, RGF_INT_GEN_RX_ICR +
offsetof(struct RGF_ICR, IMV));
- u32 icm_tx = wil_ioread32_and_clear(wil->csr +
- HOSTADDR(RGF_DMA_EP_TX_ICR) +
- offsetof(struct RGF_ICR, ICM));
- u32 icr_tx = wil_ioread32_and_clear(wil->csr +
- HOSTADDR(RGF_DMA_EP_TX_ICR) +
- offsetof(struct RGF_ICR, ICR));
- u32 imv_tx = wil_r(wil, RGF_DMA_EP_TX_ICR +
+ icm_tx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_INT_GEN_TX_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ icr_tx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_INT_GEN_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ imv_tx = wil_r(wil, RGF_INT_GEN_TX_ICR +
+ offsetof(struct RGF_ICR, IMV));
+ } else {
+ icm_rx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ icr_rx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ imv_rx = wil_r(wil, RGF_DMA_EP_RX_ICR +
offsetof(struct RGF_ICR, IMV));
- u32 icm_misc = wil_ioread32_and_clear(wil->csr +
+ icm_tx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ icr_tx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ imv_tx = wil_r(wil, RGF_DMA_EP_TX_ICR +
+ offsetof(struct RGF_ICR, IMV));
+ }
+ icm_misc = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICM));
- u32 icr_misc = wil_ioread32_and_clear(wil->csr +
+ icr_misc = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICR));
- u32 imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR +
+ imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR +
offsetof(struct RGF_ICR, IMV));
/* HALP interrupt can be unmasked when misc interrupts are
@@ -592,11 +773,11 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
* voting for wake thread - need at least 1 vote
*/
if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_RX) &&
- (wil6210_irq_rx(irq, cookie) == IRQ_WAKE_THREAD))
+ (wil->txrx_ops.irq_rx(irq, cookie) == IRQ_WAKE_THREAD))
rc = IRQ_WAKE_THREAD;
if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_TX) &&
- (wil6210_irq_tx(irq, cookie) == IRQ_WAKE_THREAD))
+ (wil->txrx_ops.irq_tx(irq, cookie) == IRQ_WAKE_THREAD))
rc = IRQ_WAKE_THREAD;
if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_MISC) &&
@@ -610,6 +791,40 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
return rc;
}
+static int wil6210_request_3msi(struct wil6210_priv *wil, int irq)
+{
+ int rc;
+
+ /* IRQ's are in the following order:
+ * - Tx
+ * - Rx
+ * - Misc
+ */
+ rc = request_irq(irq, wil->txrx_ops.irq_tx, IRQF_SHARED,
+ WIL_NAME "_tx", wil);
+ if (rc)
+ return rc;
+
+ rc = request_irq(irq + 1, wil->txrx_ops.irq_rx, IRQF_SHARED,
+ WIL_NAME "_rx", wil);
+ if (rc)
+ goto free0;
+
+ rc = request_threaded_irq(irq + 2, wil6210_irq_misc,
+ wil6210_irq_misc_thread,
+ IRQF_SHARED, WIL_NAME "_misc", wil);
+ if (rc)
+ goto free1;
+
+ return 0;
+free1:
+ free_irq(irq + 1, wil);
+free0:
+ free_irq(irq, wil);
+
+ return rc;
+}
+
/* can't use wil_ioread32_and_clear because ICC value is not set yet */
static inline void wil_clear32(void __iomem *addr)
{
@@ -624,6 +839,10 @@ void wil6210_clear_irq(struct wil6210_priv *wil)
offsetof(struct RGF_ICR, ICR));
wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, ICR));
+ wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICR));
wmb(); /* make sure write completed */
@@ -646,16 +865,28 @@ void wil6210_clear_halp(struct wil6210_priv *wil)
wil6210_unmask_halp(wil);
}
-int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi)
+int wil6210_init_irq(struct wil6210_priv *wil, int irq)
{
int rc;
- wil_dbg_misc(wil, "init_irq: %s\n", use_msi ? "MSI" : "INTx");
+ wil_dbg_misc(wil, "init_irq: %s, n_msi=%d\n",
+ wil->n_msi ? "MSI" : "INTx", wil->n_msi);
- rc = request_threaded_irq(irq, wil6210_hardirq,
- wil6210_thread_irq,
- use_msi ? 0 : IRQF_SHARED,
- WIL_NAME, wil);
+ if (wil->use_enhanced_dma_hw) {
+ wil->txrx_ops.irq_tx = wil6210_irq_tx_edma;
+ wil->txrx_ops.irq_rx = wil6210_irq_rx_edma;
+ } else {
+ wil->txrx_ops.irq_tx = wil6210_irq_tx;
+ wil->txrx_ops.irq_rx = wil6210_irq_rx;
+ }
+
+ if (wil->n_msi == 3)
+ rc = wil6210_request_3msi(wil, irq);
+ else
+ rc = request_threaded_irq(irq, wil6210_hardirq,
+ wil6210_thread_irq,
+ wil->n_msi ? 0 : IRQF_SHARED,
+ WIL_NAME, wil);
return rc;
}
@@ -665,4 +896,8 @@ void wil6210_fini_irq(struct wil6210_priv *wil, int irq)
wil_mask_irq(wil);
free_irq(irq, wil);
+ if (wil->n_msi == 3) {
+ free_irq(irq + 1, wil);
+ free_irq(irq + 2, wil);
+ }
}
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index e7006c2428a0..7debed6bec06 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -21,11 +21,14 @@
#include "wil6210.h"
#include "txrx.h"
+#include "txrx_edma.h"
#include "wmi.h"
#include "boot_loader.h"
#define WAIT_FOR_HALP_VOTE_MS 100
#define WAIT_FOR_SCAN_ABORT_MS 1000
+#define WIL_DEFAULT_NUM_RX_STATUS_RINGS 1
+#define WIL_BOARD_FILE_MAX_NAMELEN 128
bool debug_fw; /* = false; */
module_param(debug_fw, bool, 0444);
@@ -110,9 +113,29 @@ MODULE_PARM_DESC(tx_ring_order, " Tx ring order; size = 1 << order");
module_param_cb(bcast_ring_order, &ring_order_ops, &bcast_ring_order, 0444);
MODULE_PARM_DESC(bcast_ring_order, " Bcast ring order; size = 1 << order");
-#define RST_DELAY (20) /* msec, for loop in @wil_target_reset */
+enum {
+ WIL_BOOT_ERR,
+ WIL_BOOT_VANILLA,
+ WIL_BOOT_PRODUCTION,
+ WIL_BOOT_DEVELOPMENT,
+};
+
+enum {
+ WIL_SIG_STATUS_VANILLA = 0x0,
+ WIL_SIG_STATUS_DEVELOPMENT = 0x1,
+ WIL_SIG_STATUS_PRODUCTION = 0x2,
+ WIL_SIG_STATUS_CORRUPTED_PRODUCTION = 0x3,
+};
+
+#define RST_DELAY (20) /* msec, for loop in @wil_wait_device_ready */
#define RST_COUNT (1 + 1000/RST_DELAY) /* round up to be above 1 sec total */
+#define PMU_READY_DELAY_MS (4) /* ms, for sleep in @wil_wait_device_ready */
+
+#define OTP_HW_DELAY (200) /* usec, loop in @wil_wait_device_ready_talyn_mb */
+/* round up to be above 2 ms total */
+#define OTP_HW_COUNT (1 + 2000 / OTP_HW_DELAY)
+
/*
* Due to a hardware issue,
* one has to read/write to/from NIC in 32-bit chunks;
@@ -160,6 +183,37 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
}
}
+static void wil_ring_fini_tx(struct wil6210_priv *wil, int id)
+{
+ struct wil_ring *ring = &wil->ring_tx[id];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
+
+ lockdep_assert_held(&wil->mutex);
+
+ if (!ring->va)
+ return;
+
+ wil_dbg_misc(wil, "vring_fini_tx: id=%d\n", id);
+
+ spin_lock_bh(&txdata->lock);
+ txdata->dot1x_open = false;
+ txdata->mid = U8_MAX;
+ txdata->enabled = 0; /* no Tx can be in progress or start anew */
+ spin_unlock_bh(&txdata->lock);
+ /* napi_synchronize waits for completion of the current NAPI but will
+ * not prevent the next NAPI run.
+ * Add a memory barrier to guarantee that txdata->enabled is zeroed
+ * before napi_synchronize so that the next scheduled NAPI will not
+ * handle this vring
+ */
+ wmb();
+ /* make sure NAPI won't touch this vring */
+ if (test_bit(wil_status_napi_en, wil->status))
+ napi_synchronize(&wil->napi_tx);
+
+ wil->txrx_ops.ring_fini_tx(wil, ring);
+}
+
static void wil_disconnect_cid(struct wil6210_vif *vif, int cid,
u16 reason_code, bool from_event)
__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
@@ -219,12 +273,13 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
memset(sta->tid_crypto_rx, 0, sizeof(sta->tid_crypto_rx));
memset(&sta->group_crypto_rx, 0, sizeof(sta->group_crypto_rx));
/* release vrings */
- for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
- if (wil->vring2cid_tid[i][0] == cid)
- wil_vring_fini_tx(wil, i);
+ for (i = 0; i < ARRAY_SIZE(wil->ring_tx); i++) {
+ if (wil->ring2cid_tid[i][0] == cid)
+ wil_ring_fini_tx(wil, i);
}
/* statistics */
memset(&sta->stats, 0, sizeof(sta->stats));
+ sta->stats.tx_latency_min_us = U32_MAX;
}
static bool wil_vif_is_connected(struct wil6210_priv *wil, u8 mid)
@@ -453,18 +508,19 @@ static void wil_fw_error_worker(struct work_struct *work)
mutex_unlock(&wil->mutex);
}
-static int wil_find_free_vring(struct wil6210_priv *wil)
+static int wil_find_free_ring(struct wil6210_priv *wil)
{
int i;
+ int min_ring_id = wil_get_min_tx_ring_id(wil);
- for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
- if (!wil->vring_tx[i].va)
+ for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
+ if (!wil->ring_tx[i].va)
return i;
}
return -EINVAL;
}
-int wil_tx_init(struct wil6210_vif *vif, int cid)
+int wil_ring_init_tx(struct wil6210_vif *vif, int cid)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int rc = -EINVAL, ringid;
@@ -473,16 +529,17 @@ int wil_tx_init(struct wil6210_vif *vif, int cid)
wil_err(wil, "No connection pending\n");
goto out;
}
- ringid = wil_find_free_vring(wil);
+ ringid = wil_find_free_ring(wil);
if (ringid < 0) {
wil_err(wil, "No free vring found\n");
goto out;
}
- wil_dbg_wmi(wil, "Configure for connection CID %d MID %d vring %d\n",
+ wil_dbg_wmi(wil, "Configure for connection CID %d MID %d ring %d\n",
cid, vif->mid, ringid);
- rc = wil_vring_init_tx(vif, ringid, 1 << tx_ring_order, cid, 0);
+ rc = wil->txrx_ops.ring_init_tx(vif, ringid, 1 << tx_ring_order,
+ cid, 0);
if (rc)
wil_err(wil, "init TX for CID %d MID %d vring %d failed\n",
cid, vif->mid, ringid);
@@ -494,19 +551,19 @@ out:
int wil_bcast_init(struct wil6210_vif *vif)
{
struct wil6210_priv *wil = vif_to_wil(vif);
- int ri = vif->bcast_vring, rc;
+ int ri = vif->bcast_ring, rc;
- if ((ri >= 0) && wil->vring_tx[ri].va)
+ if (ri >= 0 && wil->ring_tx[ri].va)
return 0;
- ri = wil_find_free_vring(wil);
+ ri = wil_find_free_ring(wil);
if (ri < 0)
return ri;
- vif->bcast_vring = ri;
- rc = wil_vring_init_bcast(vif, ri, 1 << bcast_ring_order);
+ vif->bcast_ring = ri;
+ rc = wil->txrx_ops.ring_init_bcast(vif, ri, 1 << bcast_ring_order);
if (rc)
- vif->bcast_vring = -1;
+ vif->bcast_ring = -1;
return rc;
}
@@ -514,13 +571,13 @@ int wil_bcast_init(struct wil6210_vif *vif)
void wil_bcast_fini(struct wil6210_vif *vif)
{
struct wil6210_priv *wil = vif_to_wil(vif);
- int ri = vif->bcast_vring;
+ int ri = vif->bcast_ring;
if (ri < 0)
return;
- vif->bcast_vring = -1;
- wil_vring_fini_tx(wil, ri);
+ vif->bcast_ring = -1;
+ wil_ring_fini_tx(wil, ri);
}
void wil_bcast_fini_all(struct wil6210_priv *wil)
@@ -548,7 +605,7 @@ int wil_priv_init(struct wil6210_priv *wil)
}
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++)
- spin_lock_init(&wil->vring_tx_data[i].lock);
+ spin_lock_init(&wil->ring_tx_data[i].lock);
mutex_init(&wil->mutex);
mutex_init(&wil->vif_mutex);
@@ -589,11 +646,30 @@ int wil_priv_init(struct wil6210_priv *wil)
wil->wakeup_trigger = WMI_WAKEUP_TRIGGER_UCAST |
WMI_WAKEUP_TRIGGER_BCAST;
memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats));
- wil->vring_idle_trsh = 16;
+ wil->ring_idle_trsh = 16;
wil->reply_mid = U8_MAX;
wil->max_vifs = 1;
+ /* edma configuration can be updated via debugfs before allocation */
+ wil->num_rx_status_rings = WIL_DEFAULT_NUM_RX_STATUS_RINGS;
+ wil->use_compressed_rx_status = true;
+ wil->use_rx_hw_reordering = true;
+ wil->tx_status_ring_order = WIL_TX_SRING_SIZE_ORDER_DEFAULT;
+
+ /* Rx status ring size should be bigger than the number of RX buffers
+ * in order to prevent backpressure on the status ring, which may
+ * cause HW freeze.
+ */
+ wil->rx_status_ring_order = WIL_RX_SRING_SIZE_ORDER_DEFAULT;
+ /* Number of RX buffer IDs should be bigger than the RX descriptor
+ * ring size as in HW reorder flow, the HW can consume additional
+ * buffers before releasing the previous ones.
+ */
+ wil->rx_buff_id_count = WIL_RX_BUFF_ARR_SIZE_DEFAULT;
+
+ wil->amsdu_en = 1;
+
return 0;
out_wmi_wq:
@@ -736,14 +812,24 @@ static void wil_bl_prepare_halt(struct wil6210_priv *wil)
static inline void wil_halt_cpu(struct wil6210_priv *wil)
{
- wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
- wil_w(wil, RGF_USER_MAC_CPU_0, BIT_USER_MAC_CPU_MAN_RST);
+ if (wil->hw_version >= HW_VER_TALYN_MB) {
+ wil_w(wil, RGF_USER_USER_CPU_0_TALYN_MB,
+ BIT_USER_USER_CPU_MAN_RST);
+ wil_w(wil, RGF_USER_MAC_CPU_0_TALYN_MB,
+ BIT_USER_MAC_CPU_MAN_RST);
+ } else {
+ wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
+ wil_w(wil, RGF_USER_MAC_CPU_0, BIT_USER_MAC_CPU_MAN_RST);
+ }
}
static inline void wil_release_cpu(struct wil6210_priv *wil)
{
/* Start CPU */
- wil_w(wil, RGF_USER_USER_CPU_0, 1);
+ if (wil->hw_version >= HW_VER_TALYN_MB)
+ wil_w(wil, RGF_USER_USER_CPU_0_TALYN_MB, 1);
+ else
+ wil_w(wil, RGF_USER_USER_CPU_0, 1);
}
static void wil_set_oob_mode(struct wil6210_priv *wil, u8 mode)
@@ -767,11 +853,146 @@ static void wil_set_oob_mode(struct wil6210_priv *wil, u8 mode)
}
}
-static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
+static int wil_wait_device_ready(struct wil6210_priv *wil, int no_flash)
{
int delay = 0;
u32 x, x1 = 0;
+ /* wait until device ready. */
+ if (no_flash) {
+ msleep(PMU_READY_DELAY_MS);
+
+ wil_dbg_misc(wil, "Reset completed\n");
+ } else {
+ do {
+ msleep(RST_DELAY);
+ x = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v0,
+ boot_loader_ready));
+ if (x1 != x) {
+ wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n",
+ x1, x);
+ x1 = x;
+ }
+ if (delay++ > RST_COUNT) {
+ wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
+ x);
+ return -ETIME;
+ }
+ } while (x != BL_READY);
+
+ wil_dbg_misc(wil, "Reset completed in %d ms\n",
+ delay * RST_DELAY);
+ }
+
+ return 0;
+}
+
+static int wil_wait_device_ready_talyn_mb(struct wil6210_priv *wil)
+{
+ u32 otp_hw;
+ u8 signature_status;
+ bool otp_signature_err;
+ bool hw_section_done;
+ u32 otp_qc_secured;
+ int delay = 0;
+
+ /* Wait for OTP signature test to complete */
+ usleep_range(2000, 2200);
+
+ wil->boot_config = WIL_BOOT_ERR;
+
+ /* Poll until OTP signature status is valid.
+ * In vanilla and development modes, when signature test is complete
+ * HW sets BIT_OTP_SIGNATURE_ERR_TALYN_MB.
+ * In production mode BIT_OTP_SIGNATURE_ERR_TALYN_MB remains 0, poll
+ * for signature status change to 2 or 3.
+ */
+ do {
+ otp_hw = wil_r(wil, RGF_USER_OTP_HW_RD_MACHINE_1);
+ signature_status = WIL_GET_BITS(otp_hw, 8, 9);
+ otp_signature_err = otp_hw & BIT_OTP_SIGNATURE_ERR_TALYN_MB;
+
+ if (otp_signature_err &&
+ signature_status == WIL_SIG_STATUS_VANILLA) {
+ wil->boot_config = WIL_BOOT_VANILLA;
+ break;
+ }
+ if (otp_signature_err &&
+ signature_status == WIL_SIG_STATUS_DEVELOPMENT) {
+ wil->boot_config = WIL_BOOT_DEVELOPMENT;
+ break;
+ }
+ if (!otp_signature_err &&
+ signature_status == WIL_SIG_STATUS_PRODUCTION) {
+ wil->boot_config = WIL_BOOT_PRODUCTION;
+ break;
+ }
+ if (!otp_signature_err &&
+ signature_status ==
+ WIL_SIG_STATUS_CORRUPTED_PRODUCTION) {
+ /* Unrecognized OTP signature found. Possibly a
+ * corrupted production signature, access control
+ * is applied as in production mode, therefore
+ * do not fail
+ */
+ wil->boot_config = WIL_BOOT_PRODUCTION;
+ break;
+ }
+ if (delay++ > OTP_HW_COUNT)
+ break;
+
+ usleep_range(OTP_HW_DELAY, OTP_HW_DELAY + 10);
+ } while (!otp_signature_err && signature_status == 0);
+
+ if (wil->boot_config == WIL_BOOT_ERR) {
+ wil_err(wil,
+ "invalid boot config, signature_status %d otp_signature_err %d\n",
+ signature_status, otp_signature_err);
+ return -ETIME;
+ }
+
+ wil_dbg_misc(wil,
+ "signature test done in %d usec, otp_hw 0x%x, boot_config %d\n",
+ delay * OTP_HW_DELAY, otp_hw, wil->boot_config);
+
+ if (wil->boot_config == WIL_BOOT_VANILLA)
+ /* Assuming not SPI boot (currently not supported) */
+ goto out;
+
+ hw_section_done = otp_hw & BIT_OTP_HW_SECTION_DONE_TALYN_MB;
+ delay = 0;
+
+ while (!hw_section_done) {
+ msleep(RST_DELAY);
+
+ otp_hw = wil_r(wil, RGF_USER_OTP_HW_RD_MACHINE_1);
+ hw_section_done = otp_hw & BIT_OTP_HW_SECTION_DONE_TALYN_MB;
+
+ if (delay++ > RST_COUNT) {
+ wil_err(wil, "TO waiting for hw_section_done\n");
+ return -ETIME;
+ }
+ }
+
+ wil_dbg_misc(wil, "HW section done in %d ms\n", delay * RST_DELAY);
+
+ otp_qc_secured = wil_r(wil, RGF_OTP_QC_SECURED);
+ wil->secured_boot = otp_qc_secured & BIT_BOOT_FROM_ROM ? 1 : 0;
+ wil_dbg_misc(wil, "secured boot is %sabled\n",
+ wil->secured_boot ? "en" : "dis");
+
+out:
+ wil_dbg_misc(wil, "Reset completed\n");
+
+ return 0;
+}
+
+static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
+{
+ u32 x;
+ int rc;
+
wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
/* Clear MAC link up */
@@ -811,10 +1032,17 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
- wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
- wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
- wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
- wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00);
+ if (wil->hw_version >= HW_VER_TALYN_MB) {
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x7e000000);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003f);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0xc00000f0);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xffe7fe00);
+ } else {
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xfe000000);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003f);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xffe7fe00);
+ }
wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
@@ -830,34 +1058,12 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
- /* wait until device ready. typical time is 20..80 msec */
- if (no_flash)
- do {
- msleep(RST_DELAY);
- x = wil_r(wil, USER_EXT_USER_PMU_3);
- if (delay++ > RST_COUNT) {
- wil_err(wil, "Reset not completed, PMU_3 0x%08x\n",
- x);
- return -ETIME;
- }
- } while ((x & BIT_PMU_DEVICE_RDY) == 0);
+ if (wil->hw_version == HW_VER_TALYN_MB)
+ rc = wil_wait_device_ready_talyn_mb(wil);
else
- do {
- msleep(RST_DELAY);
- x = wil_r(wil, RGF_USER_BL +
- offsetof(struct bl_dedicated_registers_v0,
- boot_loader_ready));
- if (x1 != x) {
- wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n",
- x1, x);
- x1 = x;
- }
- if (delay++ > RST_COUNT) {
- wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
- x);
- return -ETIME;
- }
- } while (x != BL_READY);
+ rc = wil_wait_device_ready(wil, no_flash);
+ if (rc)
+ return rc;
wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
@@ -865,7 +1071,7 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
wil_s(wil, RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
- if (no_flash) {
+ if (wil->hw_version < HW_VER_TALYN_MB && no_flash) {
/* Reset OTP HW vectors to fit 40MHz */
wil_w(wil, RGF_USER_XPM_IFC_RD_TIME1, 0x60001);
wil_w(wil, RGF_USER_XPM_IFC_RD_TIME2, 0x20027);
@@ -880,7 +1086,6 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
wil_w(wil, RGF_USER_XPM_RD_DOUT_SAMPLE_TIME, 0x57);
}
- wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY);
return 0;
}
@@ -925,6 +1130,9 @@ void wil_refresh_fw_capabilities(struct wil6210_priv *wil)
wiphy->max_sched_scan_plans = WMI_MAX_PLANS_NUM;
}
+ if (test_bit(WMI_FW_CAPABILITY_TX_REQ_EXT, wil->fw_capabilities))
+ wiphy->flags |= WIPHY_FLAG_OFFCHAN_TX;
+
if (wil->platform_ops.set_features) {
features = (test_bit(WMI_FW_CAPABILITY_REF_CLOCK_CONTROL,
wil->fw_capabilities) &&
@@ -932,8 +1140,20 @@ void wil_refresh_fw_capabilities(struct wil6210_priv *wil)
wil->platform_capa)) ?
BIT(WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL) : 0;
+ if (wil->n_msi == 3)
+ features |= BIT(WIL_PLATFORM_FEATURE_TRIPLE_MSI);
+
wil->platform_ops.set_features(wil->platform_handle, features);
}
+
+ if (test_bit(WMI_FW_CAPABILITY_BACK_WIN_SIZE_64,
+ wil->fw_capabilities)) {
+ wil->max_agg_wsize = WIL_MAX_AGG_WSIZE_64;
+ wil->max_ampdu_size = WIL_MAX_AMPDU_SIZE_128;
+ } else {
+ wil->max_agg_wsize = WIL_MAX_AGG_WSIZE;
+ wil->max_ampdu_size = WIL_MAX_AMPDU_SIZE;
+ }
}
void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
@@ -945,6 +1165,28 @@ void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
le32_to_cpus(&r->head);
}
+/* construct actual board file name to use */
+void wil_get_board_file(struct wil6210_priv *wil, char *buf, size_t len)
+{
+ const char *board_file;
+ const char *wil_talyn_fw_name = ftm_mode ? WIL_FW_NAME_FTM_TALYN :
+ WIL_FW_NAME_TALYN;
+
+ if (wil->board_file) {
+ board_file = wil->board_file;
+ } else {
+ /* If specific FW file is used for Talyn,
+ * use specific board file
+ */
+ if (strcmp(wil->wil_fw_name, wil_talyn_fw_name) == 0)
+ board_file = WIL_BRD_NAME_TALYN;
+ else
+ board_file = WIL_BOARD_FILE_NAME;
+ }
+
+ strlcpy(buf, board_file, len);
+}
+
static int wil_get_bl_info(struct wil6210_priv *wil)
{
struct net_device *ndev = wil->main_ndev;
@@ -1042,8 +1284,14 @@ static int wil_get_otp_info(struct wil6210_priv *wil)
struct net_device *ndev = wil->main_ndev;
struct wiphy *wiphy = wil_to_wiphy(wil);
u8 mac[8];
+ int mac_addr;
- wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(RGF_OTP_MAC),
+ if (wil->hw_version >= HW_VER_TALYN_MB)
+ mac_addr = RGF_OTP_MAC_TALYN_MB;
+ else
+ mac_addr = RGF_OTP_MAC;
+
+ wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(mac_addr),
sizeof(mac));
if (!is_valid_ether_addr(mac)) {
wil_err(wil, "Invalid MAC %pM\n", mac);
@@ -1060,7 +1308,7 @@ static int wil_get_otp_info(struct wil6210_priv *wil)
static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
{
- ulong to = msecs_to_jiffies(1000);
+ ulong to = msecs_to_jiffies(2000);
ulong left = wait_for_completion_timeout(&wil->wmi_ready, to);
if (0 == left) {
@@ -1147,8 +1395,13 @@ static void wil_pre_fw_config(struct wil6210_priv *wil)
/* it is W1C, clear by writing back same value */
wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
- /* clear PAL_UNIT_ICR (potential D0->D3 leftover) */
- wil_s(wil, RGF_PAL_UNIT_ICR + offsetof(struct RGF_ICR, ICR), 0);
+ /* clear PAL_UNIT_ICR (potential D0->D3 leftover)
+ * In Talyn-MB host cannot access this register due to
+ * access control, hence PAL_UNIT_ICR is cleared by the FW
+ */
+ if (wil->hw_version < HW_VER_TALYN_MB)
+ wil_s(wil, RGF_PAL_UNIT_ICR + offsetof(struct RGF_ICR, ICR),
+ 0);
if (wil->fw_calib_result > 0) {
__le32 val = cpu_to_le32(wil->fw_calib_result |
@@ -1284,7 +1537,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
rc = wil_target_reset(wil, no_flash);
wil6210_clear_irq(wil);
wil_enable_irq(wil);
- wil_rx_fini(wil);
+ wil->txrx_ops.rx_fini(wil);
+ wil->txrx_ops.tx_fini(wil);
if (rc) {
if (!no_flash)
wil_bl_crash_info(wil, true);
@@ -1304,8 +1558,17 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
wil_set_oob_mode(wil, oob_mode);
if (load_fw) {
+ char board_file[WIL_BOARD_FILE_MAX_NAMELEN];
+
+ if (wil->secured_boot) {
+ wil_err(wil, "secured boot is not supported\n");
+ return -ENOTSUPP;
+ }
+
+ board_file[0] = '\0';
+ wil_get_board_file(wil, board_file, sizeof(board_file));
wil_info(wil, "Use firmware <%s> + board <%s>\n",
- wil->wil_fw_name, WIL_BOARD_FILE_NAME);
+ wil->wil_fw_name, board_file);
if (!no_flash)
wil_bl_prepare_halt(wil);
@@ -1317,11 +1580,9 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
if (rc)
goto out;
if (wil->brd_file_addr)
- rc = wil_request_board(wil, WIL_BOARD_FILE_NAME);
+ rc = wil_request_board(wil, board_file);
else
- rc = wil_request_firmware(wil,
- WIL_BOARD_FILE_NAME,
- true);
+ rc = wil_request_firmware(wil, board_file, true);
if (rc)
goto out;
@@ -1337,7 +1598,6 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
clear_bit(wil_status_resetting, wil->status);
if (load_fw) {
- wil_configure_interrupt_moderation(wil);
wil_unmask_irq(wil);
/* we just started MAC, wait for FW ready */
@@ -1352,6 +1612,15 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
return rc;
}
+ wil->txrx_ops.configure_interrupt_moderation(wil);
+
+ /* Enable OFU rdy valid bug fix, to prevent hang in oful34_rx
+ * while there is back-pressure from Host during RX
+ */
+ if (wil->hw_version >= HW_VER_TALYN_MB)
+ wil_s(wil, RGF_DMA_MISC_CTL,
+ BIT_OFUL34_RDY_VALID_BUG_FIX_EN);
+
rc = wil_restore_vifs(wil);
if (rc) {
wil_err(wil, "failed to restore vifs, rc %d\n", rc);
@@ -1406,8 +1675,12 @@ int __wil_up(struct wil6210_priv *wil)
if (rc)
return rc;
- /* Rx VRING. After MAC and beacon */
- rc = wil_rx_init(wil, 1 << rx_ring_order);
+ /* Rx RING. After MAC and beacon */
+ rc = wil->txrx_ops.rx_init(wil, 1 << rx_ring_order);
+ if (rc)
+ return rc;
+
+ rc = wil->txrx_ops.tx_init(wil);
if (rc)
return rc;
@@ -1568,3 +1841,11 @@ void wil_halp_unvote(struct wil6210_priv *wil)
mutex_unlock(&wil->halp.lock);
}
+
+void wil_init_txrx_ops(struct wil6210_priv *wil)
+{
+ if (wil->use_enhanced_dma_hw)
+ wil_init_txrx_ops_edma(wil);
+ else
+ wil_init_txrx_ops_legacy_dma(wil);
+}
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index eb6c14ed65a4..7a78a06bd356 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -120,6 +120,27 @@ static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget)
return done;
}
+static int wil6210_netdev_poll_rx_edma(struct napi_struct *napi, int budget)
+{
+ struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
+ napi_rx);
+ int quota = budget;
+ int done;
+
+ wil_rx_handle_edma(wil, &quota);
+ done = budget - quota;
+
+ if (done < budget) {
+ napi_complete_done(napi, done);
+ wil6210_unmask_irq_rx_edma(wil);
+ wil_dbg_txrx(wil, "NAPI RX complete\n");
+ }
+
+ wil_dbg_txrx(wil, "NAPI RX poll(%d) done %d\n", budget, done);
+
+ return done;
+}
+
static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
{
struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
@@ -129,11 +150,11 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
/* always process ALL Tx complete, regardless budget - it is fast */
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
- struct vring *vring = &wil->vring_tx[i];
- struct vring_tx_data *txdata = &wil->vring_tx_data[i];
+ struct wil_ring *ring = &wil->ring_tx[i];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
struct wil6210_vif *vif;
- if (!vring->va || !txdata->enabled ||
+ if (!ring->va || !txdata->enabled ||
txdata->mid >= wil->max_vifs)
continue;
@@ -157,6 +178,30 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
return min(tx_done, budget);
}
+static int wil6210_netdev_poll_tx_edma(struct napi_struct *napi, int budget)
+{
+ struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
+ napi_tx);
+ int tx_done;
+ /* There is only one status TX ring */
+ struct wil_status_ring *sring = &wil->srings[wil->tx_sring_idx];
+
+ if (!sring->va)
+ return 0;
+
+ tx_done = wil_tx_sring_handler(wil, sring);
+
+ if (tx_done < budget) {
+ napi_complete(napi);
+ wil6210_unmask_irq_tx_edma(wil);
+ wil_dbg_txrx(wil, "NAPI TX complete\n");
+ }
+
+ wil_dbg_txrx(wil, "NAPI TX poll(%d) done %d\n", budget, tx_done);
+
+ return min(tx_done, budget);
+}
+
static void wil_dev_setup(struct net_device *dev)
{
ether_setup(dev);
@@ -228,7 +273,7 @@ static void wil_p2p_discovery_timer_fn(struct timer_list *t)
static void wil_vif_init(struct wil6210_vif *vif)
{
- vif->bcast_vring = -1;
+ vif->bcast_ring = -1;
mutex_init(&vif->probe_client_mutex);
@@ -418,11 +463,21 @@ int wil_if_add(struct wil6210_priv *wil)
}
init_dummy_netdev(&wil->napi_ndev);
- netif_napi_add(&wil->napi_ndev, &wil->napi_rx, wil6210_netdev_poll_rx,
- WIL6210_NAPI_BUDGET);
- netif_tx_napi_add(&wil->napi_ndev,
- &wil->napi_tx, wil6210_netdev_poll_tx,
- WIL6210_NAPI_BUDGET);
+ if (wil->use_enhanced_dma_hw) {
+ netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
+ wil6210_netdev_poll_rx_edma,
+ WIL6210_NAPI_BUDGET);
+ netif_tx_napi_add(&wil->napi_ndev,
+ &wil->napi_tx, wil6210_netdev_poll_tx_edma,
+ WIL6210_NAPI_BUDGET);
+ } else {
+ netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
+ wil6210_netdev_poll_rx,
+ WIL6210_NAPI_BUDGET);
+ netif_tx_napi_add(&wil->napi_ndev,
+ &wil->napi_tx, wil6210_netdev_poll_tx,
+ WIL6210_NAPI_BUDGET);
+ }
wil_update_net_queues_bh(wil, vif, NULL, true);
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 19cbc6add637..89119e7facd0 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -24,11 +24,11 @@
#include <linux/rtnetlink.h>
#include <linux/pm_runtime.h>
-static bool use_msi = true;
-module_param(use_msi, bool, 0444);
-MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true");
+static int n_msi = 3;
+module_param(n_msi, int, 0444);
+MODULE_PARM_DESC(n_msi, " Use MSI interrupt: 0 - use INTx, 1 - single, or 3 - (default) ");
-static bool ftm_mode;
+bool ftm_mode;
module_param(ftm_mode, bool, 0444);
MODULE_PARM_DESC(ftm_mode, " Set factory test mode, default - false");
@@ -85,7 +85,7 @@ int wil_set_capabilities(struct wil6210_priv *wil)
wil->rgf_ucode_assert_code_addr = SPARROW_RGF_UCODE_ASSERT_CODE;
break;
case JTAG_DEV_ID_TALYN:
- wil->hw_name = "Talyn";
+ wil->hw_name = "Talyn-MA";
wil->hw_version = HW_VER_TALYN;
memcpy(fw_mapping, talyn_fw_mapping, sizeof(talyn_fw_mapping));
wil->rgf_fw_assert_code_addr = TALYN_RGF_FW_ASSERT_CODE;
@@ -93,6 +93,25 @@ int wil_set_capabilities(struct wil6210_priv *wil)
if (wil_r(wil, RGF_USER_OTP_HW_RD_MACHINE_1) &
BIT_NO_FLASH_INDICATION)
set_bit(hw_capa_no_flash, wil->hw_capa);
+ wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_TALYN :
+ WIL_FW_NAME_TALYN;
+ if (wil_fw_verify_file_exists(wil, wil_fw_name))
+ wil->wil_fw_name = wil_fw_name;
+ break;
+ case JTAG_DEV_ID_TALYN_MB:
+ wil->hw_name = "Talyn-MB";
+ wil->hw_version = HW_VER_TALYN_MB;
+ memcpy(fw_mapping, talyn_mb_fw_mapping,
+ sizeof(talyn_mb_fw_mapping));
+ wil->rgf_fw_assert_code_addr = TALYN_RGF_FW_ASSERT_CODE;
+ wil->rgf_ucode_assert_code_addr = TALYN_RGF_UCODE_ASSERT_CODE;
+ set_bit(hw_capa_no_flash, wil->hw_capa);
+ wil->use_enhanced_dma_hw = true;
+ wil->use_rx_hw_reordering = true;
+ wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_TALYN :
+ WIL_FW_NAME_TALYN;
+ if (wil_fw_verify_file_exists(wil, wil_fw_name))
+ wil->wil_fw_name = wil_fw_name;
break;
default:
wil_err(wil, "Unknown board hardware, chip_id 0x%08x, chip_revision 0x%08x\n",
@@ -102,6 +121,8 @@ int wil_set_capabilities(struct wil6210_priv *wil)
return -EINVAL;
}
+ wil_init_txrx_ops(wil);
+
iccm_section = wil_find_fw_mapping("fw_code");
if (!iccm_section) {
wil_err(wil, "fw_code section not found in fw_mapping\n");
@@ -129,12 +150,24 @@ int wil_set_capabilities(struct wil6210_priv *wil)
void wil_disable_irq(struct wil6210_priv *wil)
{
- disable_irq(wil->pdev->irq);
+ int irq = wil->pdev->irq;
+
+ disable_irq(irq);
+ if (wil->n_msi == 3) {
+ disable_irq(irq + 1);
+ disable_irq(irq + 2);
+ }
}
void wil_enable_irq(struct wil6210_priv *wil)
{
- enable_irq(wil->pdev->irq);
+ int irq = wil->pdev->irq;
+
+ enable_irq(irq);
+ if (wil->n_msi == 3) {
+ enable_irq(irq + 1);
+ enable_irq(irq + 2);
+ }
}
static void wil_remove_all_additional_vifs(struct wil6210_priv *wil)
@@ -161,28 +194,47 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
* and only MSI should be used
*/
int msi_only = pdev->msi_enabled;
- bool _use_msi = use_msi;
wil_dbg_misc(wil, "if_pcie_enable\n");
pci_set_master(pdev);
- wil_dbg_misc(wil, "Setup %s interrupt\n", use_msi ? "MSI" : "INTx");
+ /* how many MSI interrupts to request? */
+ switch (n_msi) {
+ case 3:
+ case 1:
+ wil_dbg_misc(wil, "Setup %d MSI interrupts\n", n_msi);
+ break;
+ case 0:
+ wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
+ break;
+ default:
+ wil_err(wil, "Invalid n_msi=%d, default to 1\n", n_msi);
+ n_msi = 1;
+ }
+
+ if (n_msi == 3 &&
+ pci_alloc_irq_vectors(pdev, n_msi, n_msi, PCI_IRQ_MSI) < n_msi) {
+ wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
+ n_msi = 1;
+ }
- if (use_msi && pci_enable_msi(pdev)) {
+ if (n_msi == 1 && pci_enable_msi(pdev)) {
wil_err(wil, "pci_enable_msi failed, use INTx\n");
- _use_msi = false;
+ n_msi = 0;
}
- if (!_use_msi && msi_only) {
+ wil->n_msi = n_msi;
+
+ if (wil->n_msi == 0 && msi_only) {
wil_err(wil, "Interrupt pin not routed, unable to use INTx\n");
rc = -ENODEV;
goto stop_master;
}
- rc = wil6210_init_irq(wil, pdev->irq, _use_msi);
+ rc = wil6210_init_irq(wil, pdev->irq);
if (rc)
- goto stop_master;
+ goto release_vectors;
/* need reset here to obtain MAC */
mutex_lock(&wil->mutex);
@@ -195,8 +247,9 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
release_irq:
wil6210_fini_irq(wil, pdev->irq);
- /* safe to call if no MSI */
- pci_disable_msi(pdev);
+ release_vectors:
+ /* safe to call if no allocation */
+ pci_free_irq_vectors(pdev);
stop_master:
pci_clear_master(pdev);
return rc;
@@ -257,8 +310,8 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
.fw_recovery = wil_platform_rop_fw_recovery,
};
u32 bar_size = pci_resource_len(pdev, 0);
- int dma_addr_size[] = {48, 40, 32}; /* keep descending order */
- int i;
+ int dma_addr_size[] = {64, 48, 40, 32}; /* keep descending order */
+ int i, start_idx;
/* check HW */
dev_info(&pdev->dev, WIL_NAME
@@ -293,24 +346,6 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto if_free;
}
/* rollback to err_plat */
-
- /* device supports >32bit addresses */
- for (i = 0; i < ARRAY_SIZE(dma_addr_size); i++) {
- rc = dma_set_mask_and_coherent(dev,
- DMA_BIT_MASK(dma_addr_size[i]));
- if (rc) {
- dev_err(dev, "dma_set_mask_and_coherent(%d) failed: %d\n",
- dma_addr_size[i], rc);
- continue;
- }
- dev_info(dev, "using dma mask %d", dma_addr_size[i]);
- wil->dma_addr_size = dma_addr_size[i];
- break;
- }
-
- if (wil->dma_addr_size == 0)
- goto err_plat;
-
rc = pci_enable_device(pdev);
if (rc && pdev->msi_enabled == 0) {
wil_err(wil,
@@ -350,6 +385,28 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
wil_err(wil, "wil_set_capabilities failed, rc %d\n", rc);
goto err_iounmap;
}
+
+ /* device supports >32bit addresses.
+ * for legacy DMA start from 48 bit.
+ */
+ start_idx = wil->use_enhanced_dma_hw ? 0 : 1;
+
+ for (i = start_idx; i < ARRAY_SIZE(dma_addr_size); i++) {
+ rc = dma_set_mask_and_coherent(dev,
+ DMA_BIT_MASK(dma_addr_size[i]));
+ if (rc) {
+ dev_err(dev, "dma_set_mask_and_coherent(%d) failed: %d\n",
+ dma_addr_size[i], rc);
+ continue;
+ }
+ dev_info(dev, "using dma mask %d", dma_addr_size[i]);
+ wil->dma_addr_size = dma_addr_size[i];
+ break;
+ }
+
+ if (wil->dma_addr_size == 0)
+ goto err_iounmap;
+
wil6210_clear_irq(wil);
/* FW should raise IRQ when ready */
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index ba81fb3ac96f..3a4194779ddf 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -211,7 +211,7 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
goto reject_suspend;
}
- if (!wil_is_rx_idle(wil)) {
+ if (!wil->txrx_ops.is_rx_idle(wil)) {
wil_dbg_pm(wil, "Pending RX data, reject suspend\n");
wil->suspend_stats.rejected_by_host++;
goto reject_suspend;
@@ -235,9 +235,9 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
start = jiffies;
data_comp_to = jiffies + msecs_to_jiffies(WIL_DATA_COMPLETION_TO_MS);
if (test_bit(wil_status_napi_en, wil->status)) {
- while (!wil_is_rx_idle(wil)) {
+ while (!wil->txrx_ops.is_rx_idle(wil)) {
if (time_after(jiffies, data_comp_to)) {
- if (wil_is_rx_idle(wil))
+ if (wil->txrx_ops.is_rx_idle(wil))
break;
wil_err(wil,
"TO waiting for idle RX, suspend failed\n");
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index 76f8084c1fd8..b608aa16b4f1 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -95,17 +95,17 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
{
struct wil6210_vif *vif;
struct net_device *ndev;
- struct vring_rx_desc *d = wil_skb_rxdesc(skb);
- int tid = wil_rxdesc_tid(d);
- int cid = wil_rxdesc_cid(d);
- int mid = wil_rxdesc_mid(d);
- u16 seq = wil_rxdesc_seq(d);
- int mcast = wil_rxdesc_mcast(d);
- struct wil_sta_info *sta = &wil->sta[cid];
+ int tid, cid, mid, mcast, retry;
+ u16 seq;
+ struct wil_sta_info *sta;
struct wil_tid_ampdu_rx *r;
u16 hseq;
int index;
+ wil->txrx_ops.get_reorder_params(wil, skb, &tid, &cid, &mid, &seq,
+ &mcast, &retry);
+ sta = &wil->sta[cid];
+
wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x mcast %01x\n",
mid, cid, tid, seq, mcast);
@@ -117,11 +117,6 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
}
ndev = vif_to_ndev(vif);
- if (unlikely(mcast)) {
- wil_netif_rx_any(skb, ndev);
- return;
- }
-
spin_lock(&sta->tid_rx_lock);
r = sta->tid_rx[tid];
@@ -130,6 +125,19 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
goto out;
}
+ if (unlikely(mcast)) {
+ if (retry && seq == r->mcast_last_seq) {
+ r->drop_dup_mcast++;
+ wil_dbg_txrx(wil, "Rx drop: dup mcast seq 0x%03x\n",
+ seq);
+ dev_kfree_skb(skb);
+ goto out;
+ }
+ r->mcast_last_seq = seq;
+ wil_netif_rx_any(skb, ndev);
+ goto out;
+ }
+
r->total++;
hseq = r->head_seq_num;
@@ -262,6 +270,7 @@ struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
r->buf_size = size;
r->stored_mpdu_num = 0;
r->first_time = true;
+ r->mcast_last_seq = U16_MAX;
return r;
}
@@ -288,7 +297,7 @@ void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
/* ADDBA processing */
static u16 wil_agg_size(struct wil6210_priv *wil, u16 req_agg_wsize)
{
- u16 max_agg_size = min_t(u16, WIL_MAX_AGG_WSIZE, WIL_MAX_AMPDU_SIZE /
+ u16 max_agg_size = min_t(u16, wil->max_agg_wsize, wil->max_ampdu_size /
(mtu_max + WIL_MAX_MPDU_OVERHEAD));
if (!req_agg_wsize)
@@ -315,7 +324,10 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
* bits 6..15: buffer size
*/
u16 req_agg_wsize = WIL_GET_BITS(param_set, 6, 15);
- bool agg_amsdu = !!(param_set & BIT(0));
+ bool agg_amsdu = wil->use_enhanced_dma_hw &&
+ wil->use_rx_hw_reordering &&
+ test_bit(WMI_FW_CAPABILITY_AMSDU, wil->fw_capabilities) &&
+ wil->amsdu_en && (param_set & BIT(0));
int ba_policy = param_set & BIT(1);
u16 status = WLAN_STATUS_SUCCESS;
u16 ssn = seq_ctrl >> 4;
@@ -352,16 +364,17 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
if (status == WLAN_STATUS_SUCCESS) {
if (req_agg_wsize == 0) {
wil_dbg_misc(wil, "Suggest BACK wsize %d\n",
- WIL_MAX_AGG_WSIZE);
- agg_wsize = WIL_MAX_AGG_WSIZE;
+ wil->max_agg_wsize);
+ agg_wsize = wil->max_agg_wsize;
} else {
agg_wsize = min_t(u16,
- WIL_MAX_AGG_WSIZE, req_agg_wsize);
+ wil->max_agg_wsize, req_agg_wsize);
}
}
- rc = wmi_addba_rx_resp(wil, mid, cid, tid, dialog_token, status,
- agg_amsdu, agg_wsize, agg_timeout);
+ rc = wil->txrx_ops.wmi_addba_rx_resp(wil, mid, cid, tid, dialog_token,
+ status, agg_amsdu, agg_wsize,
+ agg_timeout);
if (rc || (status != WLAN_STATUS_SUCCESS)) {
wil_err(wil, "do not apply ba, rc(%d), status(%d)\n", rc,
status);
@@ -384,7 +397,7 @@ int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
{
u8 agg_wsize = wil_agg_size(wil, wsize);
u16 agg_timeout = 0;
- struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ringid];
int rc = 0;
if (txdata->addba_in_progress) {
diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h
index c4db2a9d9f7f..853abc3a73e4 100644
--- a/drivers/net/wireless/ath/wil6210/trace.h
+++ b/drivers/net/wireless/ath/wil6210/trace.h
@@ -187,6 +187,40 @@ TRACE_EVENT(wil6210_rx,
__entry->seq, __entry->type, __entry->subtype)
);
+TRACE_EVENT(wil6210_rx_status,
+ TP_PROTO(struct wil6210_priv *wil, u8 use_compressed, u16 buff_id,
+ void *msg),
+ TP_ARGS(wil, use_compressed, buff_id, msg),
+ TP_STRUCT__entry(__field(u8, use_compressed)
+ __field(u16, buff_id)
+ __field(unsigned int, len)
+ __field(u8, mid)
+ __field(u8, cid)
+ __field(u8, tid)
+ __field(u8, type)
+ __field(u8, subtype)
+ __field(u16, seq)
+ __field(u8, mcs)
+ ),
+ TP_fast_assign(__entry->use_compressed = use_compressed;
+ __entry->buff_id = buff_id;
+ __entry->len = wil_rx_status_get_length(msg);
+ __entry->mid = wil_rx_status_get_mid(msg);
+ __entry->cid = wil_rx_status_get_cid(msg);
+ __entry->tid = wil_rx_status_get_tid(msg);
+ __entry->type = wil_rx_status_get_frame_type(wil,
+ msg);
+ __entry->subtype = wil_rx_status_get_fc1(wil, msg);
+ __entry->seq = wil_rx_status_get_seq(wil, msg);
+ __entry->mcs = wil_rx_status_get_mcs(msg);
+ ),
+ TP_printk(
+ "compressed %d buff_id %d len %d mid %d cid %d tid %d mcs %d seq 0x%03x type 0x%1x subtype 0x%1x",
+ __entry->use_compressed, __entry->buff_id, __entry->len,
+ __entry->mid, __entry->cid, __entry->tid, __entry->mcs,
+ __entry->seq, __entry->type, __entry->subtype)
+);
+
TRACE_EVENT(wil6210_tx,
TP_PROTO(u8 vring, u16 index, unsigned int len, u8 frags),
TP_ARGS(vring, index, len, frags),
@@ -226,6 +260,31 @@ TRACE_EVENT(wil6210_tx_done,
__entry->err)
);
+TRACE_EVENT(wil6210_tx_status,
+ TP_PROTO(struct wil_ring_tx_status *msg, u16 index,
+ unsigned int len),
+ TP_ARGS(msg, index, len),
+ TP_STRUCT__entry(__field(u16, index)
+ __field(unsigned int, len)
+ __field(u8, num_descs)
+ __field(u8, ring_id)
+ __field(u8, status)
+ __field(u8, mcs)
+
+ ),
+ TP_fast_assign(__entry->index = index;
+ __entry->len = len;
+ __entry->num_descs = msg->num_descriptors;
+ __entry->ring_id = msg->ring_id;
+ __entry->status = msg->status;
+ __entry->mcs = wil_tx_status_get_mcs(msg);
+ ),
+ TP_printk(
+ "ring_id %d swtail 0x%x len %d num_descs %d status 0x%x mcs %d",
+ __entry->ring_id, __entry->index, __entry->len,
+ __entry->num_descs, __entry->status, __entry->mcs)
+);
+
#endif /* WIL6210_TRACE_H || TRACE_HEADER_MULTI_READ*/
#if defined(CONFIG_WIL6210_TRACING) && !defined(__CHECKER__)
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index b9a9fa828961..6a7943e487fb 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -28,6 +28,7 @@
#include "wmi.h"
#include "txrx.h"
#include "trace.h"
+#include "txrx_edma.h"
static bool rtap_include_phy_info;
module_param(rtap_include_phy_info, bool, 0444);
@@ -47,62 +48,28 @@ static inline uint wil_rx_snaplen(void)
return rx_align_2 ? 6 : 0;
}
-static inline int wil_vring_is_empty(struct vring *vring)
+/* wil_ring_wmark_low - low watermark for available descriptor space */
+static inline int wil_ring_wmark_low(struct wil_ring *ring)
{
- return vring->swhead == vring->swtail;
+ return ring->size / 8;
}
-static inline u32 wil_vring_next_tail(struct vring *vring)
+/* wil_ring_wmark_high - high watermark for available descriptor space */
+static inline int wil_ring_wmark_high(struct wil_ring *ring)
{
- return (vring->swtail + 1) % vring->size;
-}
-
-static inline void wil_vring_advance_head(struct vring *vring, int n)
-{
- vring->swhead = (vring->swhead + n) % vring->size;
-}
-
-static inline int wil_vring_is_full(struct vring *vring)
-{
- return wil_vring_next_tail(vring) == vring->swhead;
-}
-
-/* Used space in Tx Vring */
-static inline int wil_vring_used_tx(struct vring *vring)
-{
- u32 swhead = vring->swhead;
- u32 swtail = vring->swtail;
- return (vring->size + swhead - swtail) % vring->size;
-}
-
-/* Available space in Tx Vring */
-static inline int wil_vring_avail_tx(struct vring *vring)
-{
- return vring->size - wil_vring_used_tx(vring) - 1;
-}
-
-/* wil_vring_wmark_low - low watermark for available descriptor space */
-static inline int wil_vring_wmark_low(struct vring *vring)
-{
- return vring->size/8;
-}
-
-/* wil_vring_wmark_high - high watermark for available descriptor space */
-static inline int wil_vring_wmark_high(struct vring *vring)
-{
- return vring->size/4;
+ return ring->size / 4;
}
/* returns true if num avail descriptors is lower than wmark_low */
-static inline int wil_vring_avail_low(struct vring *vring)
+static inline int wil_ring_avail_low(struct wil_ring *ring)
{
- return wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring);
+ return wil_ring_avail_tx(ring) < wil_ring_wmark_low(ring);
}
/* returns true if num avail descriptors is higher than wmark_high */
-static inline int wil_vring_avail_high(struct vring *vring)
+static inline int wil_ring_avail_high(struct wil_ring *ring)
{
- return wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring);
+ return wil_ring_avail_tx(ring) > wil_ring_wmark_high(ring);
}
/* returns true when all tx vrings are empty */
@@ -112,9 +79,10 @@ bool wil_is_tx_idle(struct wil6210_priv *wil)
unsigned long data_comp_to;
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
- struct vring *vring = &wil->vring_tx[i];
- int vring_index = vring - wil->vring_tx;
- struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
+ struct wil_ring *vring = &wil->ring_tx[i];
+ int vring_index = vring - wil->ring_tx;
+ struct wil_ring_tx_data *txdata =
+ &wil->ring_tx_data[vring_index];
spin_lock(&txdata->lock);
@@ -126,7 +94,7 @@ bool wil_is_tx_idle(struct wil6210_priv *wil)
data_comp_to = jiffies + msecs_to_jiffies(
WIL_DATA_COMPLETION_TO_MS);
if (test_bit(wil_status_napi_en, wil->status)) {
- while (!wil_vring_is_empty(vring)) {
+ while (!wil_ring_is_empty(vring)) {
if (time_after(jiffies, data_comp_to)) {
wil_dbg_pm(wil,
"TO waiting for idle tx\n");
@@ -150,13 +118,7 @@ bool wil_is_tx_idle(struct wil6210_priv *wil)
return true;
}
-/* wil_val_in_range - check if value in [min,max) */
-static inline bool wil_val_in_range(int val, int min, int max)
-{
- return val >= min && val < max;
-}
-
-static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
+static int wil_vring_alloc(struct wil6210_priv *wil, struct wil_ring *vring)
{
struct device *dev = wil_to_dev(wil);
size_t sz = vring->size * sizeof(vring->va[0]);
@@ -205,7 +167,8 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
* we can use any
*/
for (i = 0; i < vring->size; i++) {
- volatile struct vring_tx_desc *_d = &vring->va[i].tx;
+ volatile struct vring_tx_desc *_d =
+ &vring->va[i].tx.legacy;
_d->dma.status = TX_DMA_STATUS_DU;
}
@@ -216,9 +179,10 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
return 0;
}
-static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d,
+static void wil_txdesc_unmap(struct device *dev, union wil_tx_desc *desc,
struct wil_ctx *ctx)
{
+ struct vring_tx_desc *d = &desc->legacy;
dma_addr_t pa = wil_desc_addr(&d->dma.addr);
u16 dmalen = le16_to_cpu(d->dma.length);
@@ -234,15 +198,14 @@ static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d,
}
}
-static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
- int tx)
+static void wil_vring_free(struct wil6210_priv *wil, struct wil_ring *vring)
{
struct device *dev = wil_to_dev(wil);
size_t sz = vring->size * sizeof(vring->va[0]);
lockdep_assert_held(&wil->mutex);
- if (tx) {
- int vring_index = vring - wil->vring_tx;
+ if (!vring->is_rx) {
+ int vring_index = vring - wil->ring_tx;
wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n",
vring_index, vring->size, vring->va,
@@ -253,33 +216,33 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
&vring->pa, vring->ctx);
}
- while (!wil_vring_is_empty(vring)) {
+ while (!wil_ring_is_empty(vring)) {
dma_addr_t pa;
u16 dmalen;
struct wil_ctx *ctx;
- if (tx) {
+ if (!vring->is_rx) {
struct vring_tx_desc dd, *d = &dd;
volatile struct vring_tx_desc *_d =
- &vring->va[vring->swtail].tx;
+ &vring->va[vring->swtail].tx.legacy;
ctx = &vring->ctx[vring->swtail];
if (!ctx) {
wil_dbg_txrx(wil,
"ctx(%d) was already completed\n",
vring->swtail);
- vring->swtail = wil_vring_next_tail(vring);
+ vring->swtail = wil_ring_next_tail(vring);
continue;
}
*d = *_d;
- wil_txdesc_unmap(dev, d, ctx);
+ wil_txdesc_unmap(dev, (union wil_tx_desc *)d, ctx);
if (ctx->skb)
dev_kfree_skb_any(ctx->skb);
- vring->swtail = wil_vring_next_tail(vring);
+ vring->swtail = wil_ring_next_tail(vring);
} else { /* rx */
struct vring_rx_desc dd, *d = &dd;
volatile struct vring_rx_desc *_d =
- &vring->va[vring->swhead].rx;
+ &vring->va[vring->swhead].rx.legacy;
ctx = &vring->ctx[vring->swhead];
*d = *_d;
@@ -287,7 +250,7 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
dmalen = le16_to_cpu(d->dma.length);
dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
kfree_skb(ctx->skb);
- wil_vring_advance_head(vring, 1);
+ wil_ring_advance_head(vring, 1);
}
}
dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
@@ -302,13 +265,13 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
*
* Safe to call from IRQ
*/
-static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
+static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct wil_ring *vring,
u32 i, int headroom)
{
struct device *dev = wil_to_dev(wil);
unsigned int sz = wil->rx_buf_len + ETH_HLEN + wil_rx_snaplen();
struct vring_rx_desc dd, *d = &dd;
- volatile struct vring_rx_desc *_d = &vring->va[i].rx;
+ volatile struct vring_rx_desc *_d = &vring->va[i].rx.legacy;
dma_addr_t pa;
struct sk_buff *skb = dev_alloc_skb(sz + headroom);
@@ -318,6 +281,12 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
skb_reserve(skb, headroom);
skb_put(skb, sz);
+ /**
+ * Make sure that the network stack calculates checksum for packets
+ * which failed the HW checksum calculation
+ */
+ skb->ip_summed = CHECKSUM_NONE;
+
pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, pa))) {
kfree_skb(skb);
@@ -445,19 +414,12 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
}
}
-/* similar to ieee80211_ version, but FC contain only 1-st byte */
-static inline int wil_is_back_req(u8 fc)
-{
- return (fc & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
- (IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
-}
-
-bool wil_is_rx_idle(struct wil6210_priv *wil)
+static bool wil_is_rx_idle(struct wil6210_priv *wil)
{
struct vring_rx_desc *_d;
- struct vring *vring = &wil->vring_rx;
+ struct wil_ring *ring = &wil->ring_rx;
- _d = (struct vring_rx_desc *)&vring->va[vring->swhead].rx;
+ _d = (struct vring_rx_desc *)&ring->va[ring->swhead].rx.legacy;
if (_d->dma.status & RX_DMA_STATUS_DU)
return false;
@@ -472,7 +434,7 @@ bool wil_is_rx_idle(struct wil6210_priv *wil)
* Safe to call from IRQ
*/
static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
- struct vring *vring)
+ struct wil_ring *vring)
{
struct device *dev = wil_to_dev(wil);
struct wil6210_vif *vif;
@@ -492,11 +454,11 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
again:
- if (unlikely(wil_vring_is_empty(vring)))
+ if (unlikely(wil_ring_is_empty(vring)))
return NULL;
i = (int)vring->swhead;
- _d = &vring->va[i].rx;
+ _d = &vring->va[i].rx.legacy;
if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
/* it is not error, we just reached end of Rx done area */
return NULL;
@@ -504,7 +466,7 @@ again:
skb = vring->ctx[i].skb;
vring->ctx[i].skb = NULL;
- wil_vring_advance_head(vring, 1);
+ wil_ring_advance_head(vring, 1);
if (!skb) {
wil_err(wil, "No Rx skb at [%d]\n", i);
goto again;
@@ -613,6 +575,8 @@ again:
* mis-calculates TCP checksum - if it should be 0x0,
* it writes 0xffff in violation of RFC 1624
*/
+ else
+ stats->rx_csum_err++;
}
if (snaplen) {
@@ -641,15 +605,15 @@ again:
static int wil_rx_refill(struct wil6210_priv *wil, int count)
{
struct net_device *ndev = wil->main_ndev;
- struct vring *v = &wil->vring_rx;
+ struct wil_ring *v = &wil->ring_rx;
u32 next_tail;
int rc = 0;
int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
WIL6210_RTAP_SIZE : 0;
- for (; next_tail = wil_vring_next_tail(v),
- (next_tail != v->swhead) && (count-- > 0);
- v->swtail = next_tail) {
+ for (; next_tail = wil_ring_next_tail(v),
+ (next_tail != v->swhead) && (count-- > 0);
+ v->swtail = next_tail) {
rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
if (unlikely(rc)) {
wil_err_ratelimited(wil, "Error %d in rx refill[%d]\n",
@@ -677,7 +641,7 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
* Cut'n'paste from original memcmp (see lib/string.c)
* with minimal modifications
*/
-static int reverse_memcmp(const void *cs, const void *ct, size_t count)
+int reverse_memcmp(const void *cs, const void *ct, size_t count)
{
const unsigned char *su1, *su2;
int res = 0;
@@ -722,6 +686,30 @@ static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb)
return 0;
}
+static int wil_rx_error_check(struct wil6210_priv *wil, struct sk_buff *skb,
+ struct wil_net_stats *stats)
+{
+ struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+
+ if ((d->dma.status & RX_DMA_STATUS_ERROR) &&
+ (d->dma.error & RX_DMA_ERROR_MIC)) {
+ stats->rx_mic_error++;
+ wil_dbg_txrx(wil, "MIC error, dropping packet\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void wil_get_netif_rx_params(struct sk_buff *skb, int *cid,
+ int *security)
+{
+ struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+
+ *cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
+ *security = wil_rxdesc_security(d);
+}
+
/*
* Pass Rx packet to the netif. Update statistics.
* Called in softirq context (NAPI poll).
@@ -733,15 +721,14 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
struct wil6210_priv *wil = ndev_to_wil(ndev);
struct wireless_dev *wdev = vif_to_wdev(vif);
unsigned int len = skb->len;
- struct vring_rx_desc *d = wil_skb_rxdesc(skb);
- int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
- int security = wil_rxdesc_security(d);
+ int cid;
+ int security;
struct ethhdr *eth = (void *)skb->data;
/* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
* is not suitable, need to look at data
*/
int mcast = is_multicast_ether_addr(eth->h_dest);
- struct wil_net_stats *stats = &wil->sta[cid].stats;
+ struct wil_net_stats *stats;
struct sk_buff *xmit_skb = NULL;
static const char * const gro_res_str[] = {
[GRO_MERGED] = "GRO_MERGED",
@@ -751,6 +738,10 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
[GRO_DROP] = "GRO_DROP",
};
+ wil->txrx_ops.get_netif_rx_params(skb, &cid, &security);
+
+ stats = &wil->sta[cid].stats;
+
if (ndev->features & NETIF_F_RXHASH)
/* fake L4 to ensure it won't be re-calculated later
* set hash to any non-zero value to activate rps
@@ -761,13 +752,19 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
skb_orphan(skb);
- if (security && (wil_rx_crypto_check(wil, skb) != 0)) {
+ if (security && (wil->txrx_ops.rx_crypto_check(wil, skb) != 0)) {
rc = GRO_DROP;
dev_kfree_skb(skb);
stats->rx_replay++;
goto stats;
}
+ /* check errors reported by HW and update statistics */
+ if (unlikely(wil->txrx_ops.rx_error_check(wil, skb, stats))) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) {
if (mcast) {
/* send multicast frames both to higher layers in
@@ -835,7 +832,7 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota)
{
struct net_device *ndev = wil->main_ndev;
struct wireless_dev *wdev = ndev->ieee80211_ptr;
- struct vring *v = &wil->vring_rx;
+ struct wil_ring *v = &wil->ring_rx;
struct sk_buff *skb;
if (unlikely(!v->va)) {
@@ -875,9 +872,9 @@ static void wil_rx_buf_len_init(struct wil6210_priv *wil)
}
}
-int wil_rx_init(struct wil6210_priv *wil, u16 size)
+static int wil_rx_init(struct wil6210_priv *wil, u16 size)
{
- struct vring *vring = &wil->vring_rx;
+ struct wil_ring *vring = &wil->ring_rx;
int rc;
wil_dbg_misc(wil, "rx_init\n");
@@ -890,6 +887,7 @@ int wil_rx_init(struct wil6210_priv *wil, u16 size)
wil_rx_buf_len_init(wil);
vring->size = size;
+ vring->is_rx = true;
rc = wil_vring_alloc(wil, vring);
if (rc)
return rc;
@@ -904,22 +902,46 @@ int wil_rx_init(struct wil6210_priv *wil, u16 size)
return 0;
err_free:
- wil_vring_free(wil, vring, 0);
+ wil_vring_free(wil, vring);
return rc;
}
-void wil_rx_fini(struct wil6210_priv *wil)
+static void wil_rx_fini(struct wil6210_priv *wil)
{
- struct vring *vring = &wil->vring_rx;
+ struct wil_ring *vring = &wil->ring_rx;
wil_dbg_misc(wil, "rx_fini\n");
if (vring->va)
- wil_vring_free(wil, vring, 0);
+ wil_vring_free(wil, vring);
+}
+
+static int wil_tx_desc_map(union wil_tx_desc *desc, dma_addr_t pa,
+ u32 len, int vring_index)
+{
+ struct vring_tx_desc *d = &desc->legacy;
+
+ wil_desc_addr_set(&d->dma.addr, pa);
+ d->dma.ip_length = 0;
+ /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
+ d->dma.b11 = 0/*14 | BIT(7)*/;
+ d->dma.error = 0;
+ d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
+ d->dma.length = cpu_to_le16((u16)len);
+ d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
+ d->mac.d[0] = 0;
+ d->mac.d[1] = 0;
+ d->mac.d[2] = 0;
+ d->mac.ucode_cmd = 0;
+ /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
+ d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
+ (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
+
+ return 0;
}
-static inline void wil_tx_data_init(struct vring_tx_data *txdata)
+void wil_tx_data_init(struct wil_ring_tx_data *txdata)
{
spin_lock_bh(&txdata->lock);
txdata->dot1x_open = 0;
@@ -935,8 +957,8 @@ static inline void wil_tx_data_init(struct vring_tx_data *txdata)
spin_unlock_bh(&txdata->lock);
}
-int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
- int cid, int tid)
+static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
+ int cid, int tid)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int rc;
@@ -966,8 +988,8 @@ int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
} __packed reply = {
.cmd = {.status = WMI_FW_STATUS_FAILURE},
};
- struct vring *vring = &wil->vring_tx[id];
- struct vring_tx_data *txdata = &wil->vring_tx_data[id];
+ struct wil_ring *vring = &wil->ring_tx[id];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n",
cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
@@ -980,13 +1002,14 @@ int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
}
wil_tx_data_init(txdata);
+ vring->is_rx = false;
vring->size = size;
rc = wil_vring_alloc(wil, vring);
if (rc)
goto out;
- wil->vring2cid_tid[id][0] = cid;
- wil->vring2cid_tid[id][1] = tid;
+ wil->ring2cid_tid[id][0] = cid;
+ wil->ring2cid_tid[id][1] = tid;
cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
@@ -1019,9 +1042,9 @@ int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
txdata->dot1x_open = false;
txdata->enabled = 0;
spin_unlock_bh(&txdata->lock);
- wil_vring_free(wil, vring, 1);
- wil->vring2cid_tid[id][0] = WIL6210_MAX_CID;
- wil->vring2cid_tid[id][1] = 0;
+ wil_vring_free(wil, vring);
+ wil->ring2cid_tid[id][0] = WIL6210_MAX_CID;
+ wil->ring2cid_tid[id][1] = 0;
out:
@@ -1050,8 +1073,8 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
} __packed reply = {
.cmd = {.status = WMI_FW_STATUS_FAILURE},
};
- struct vring *vring = &wil->vring_tx[id];
- struct vring_tx_data *txdata = &wil->vring_tx_data[id];
+ struct wil_ring *vring = &wil->ring_tx[id];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
wil_dbg_misc(wil, "vring_init_bcast: max_mpdu_size %d\n",
cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
@@ -1064,13 +1087,14 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
}
wil_tx_data_init(txdata);
+ vring->is_rx = false;
vring->size = size;
rc = wil_vring_alloc(wil, vring);
if (rc)
goto out;
- wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */
- wil->vring2cid_tid[id][1] = 0; /* TID */
+ wil->ring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */
+ wil->ring2cid_tid[id][1] = 0; /* TID */
cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
@@ -1101,62 +1125,32 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
txdata->enabled = 0;
txdata->dot1x_open = false;
spin_unlock_bh(&txdata->lock);
- wil_vring_free(wil, vring, 1);
+ wil_vring_free(wil, vring);
out:
return rc;
}
-void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
-{
- struct vring *vring = &wil->vring_tx[id];
- struct vring_tx_data *txdata = &wil->vring_tx_data[id];
-
- lockdep_assert_held(&wil->mutex);
-
- if (!vring->va)
- return;
-
- wil_dbg_misc(wil, "vring_fini_tx: id=%d\n", id);
-
- spin_lock_bh(&txdata->lock);
- txdata->dot1x_open = false;
- txdata->mid = U8_MAX;
- txdata->enabled = 0; /* no Tx can be in progress or start anew */
- spin_unlock_bh(&txdata->lock);
- /* napi_synchronize waits for completion of the current NAPI but will
- * not prevent the next NAPI run.
- * Add a memory barrier to guarantee that txdata->enabled is zeroed
- * before napi_synchronize so that the next scheduled NAPI will not
- * handle this vring
- */
- wmb();
- /* make sure NAPI won't touch this vring */
- if (test_bit(wil_status_napi_en, wil->status))
- napi_synchronize(&wil->napi_tx);
-
- wil_vring_free(wil, vring, 1);
-}
-
-static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
- struct wil6210_vif *vif,
- struct sk_buff *skb)
+static struct wil_ring *wil_find_tx_ucast(struct wil6210_priv *wil,
+ struct wil6210_vif *vif,
+ struct sk_buff *skb)
{
int i;
struct ethhdr *eth = (void *)skb->data;
int cid = wil_find_cid(wil, vif->mid, eth->h_dest);
+ int min_ring_id = wil_get_min_tx_ring_id(wil);
if (cid < 0)
return NULL;
/* TODO: fix for multiple TID */
- for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
- if (!wil->vring_tx_data[i].dot1x_open &&
- (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ for (i = min_ring_id; i < ARRAY_SIZE(wil->ring2cid_tid); i++) {
+ if (!wil->ring_tx_data[i].dot1x_open &&
+ skb->protocol != cpu_to_be16(ETH_P_PAE))
continue;
- if (wil->vring2cid_tid[i][0] == cid) {
- struct vring *v = &wil->vring_tx[i];
- struct vring_tx_data *txdata = &wil->vring_tx_data[i];
+ if (wil->ring2cid_tid[i][0] == cid) {
+ struct wil_ring *v = &wil->ring_tx[i];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n",
eth->h_dest, i);
@@ -1174,42 +1168,43 @@ static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
return NULL;
}
-static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
- struct vring *vring, struct sk_buff *skb);
+static int wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
+ struct wil_ring *ring, struct sk_buff *skb);
-static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
- struct wil6210_vif *vif,
- struct sk_buff *skb)
+static struct wil_ring *wil_find_tx_ring_sta(struct wil6210_priv *wil,
+ struct wil6210_vif *vif,
+ struct sk_buff *skb)
{
- struct vring *v;
+ struct wil_ring *ring;
int i;
u8 cid;
- struct vring_tx_data *txdata;
+ struct wil_ring_tx_data *txdata;
+ int min_ring_id = wil_get_min_tx_ring_id(wil);
/* In the STA mode, it is expected to have only 1 VRING
* for the AP we connected to.
* find 1-st vring eligible for this skb and use it.
*/
- for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
- v = &wil->vring_tx[i];
- txdata = &wil->vring_tx_data[i];
- if (!v->va || !txdata->enabled || txdata->mid != vif->mid)
+ for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
+ ring = &wil->ring_tx[i];
+ txdata = &wil->ring_tx_data[i];
+ if (!ring->va || !txdata->enabled || txdata->mid != vif->mid)
continue;
- cid = wil->vring2cid_tid[i][0];
+ cid = wil->ring2cid_tid[i][0];
if (cid >= WIL6210_MAX_CID) /* skip BCAST */
continue;
- if (!wil->vring_tx_data[i].dot1x_open &&
- (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ if (!wil->ring_tx_data[i].dot1x_open &&
+ skb->protocol != cpu_to_be16(ETH_P_PAE))
continue;
wil_dbg_txrx(wil, "Tx -> ring %d\n", i);
- return v;
+ return ring;
}
- wil_dbg_txrx(wil, "Tx while no vrings active?\n");
+ wil_dbg_txrx(wil, "Tx while no rings active?\n");
return NULL;
}
@@ -1225,22 +1220,22 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
* Use old strategy when new is not supported yet:
* - for PBSS
*/
-static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
- struct wil6210_vif *vif,
- struct sk_buff *skb)
+static struct wil_ring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
+ struct wil6210_vif *vif,
+ struct sk_buff *skb)
{
- struct vring *v;
- struct vring_tx_data *txdata;
- int i = vif->bcast_vring;
+ struct wil_ring *v;
+ struct wil_ring_tx_data *txdata;
+ int i = vif->bcast_ring;
if (i < 0)
return NULL;
- v = &wil->vring_tx[i];
- txdata = &wil->vring_tx_data[i];
+ v = &wil->ring_tx[i];
+ txdata = &wil->ring_tx_data[i];
if (!v->va || !txdata->enabled)
return NULL;
- if (!wil->vring_tx_data[i].dot1x_open &&
- (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ if (!wil->ring_tx_data[i].dot1x_open &&
+ skb->protocol != cpu_to_be16(ETH_P_PAE))
return NULL;
return v;
@@ -1250,35 +1245,36 @@ static void wil_set_da_for_vring(struct wil6210_priv *wil,
struct sk_buff *skb, int vring_index)
{
struct ethhdr *eth = (void *)skb->data;
- int cid = wil->vring2cid_tid[vring_index][0];
+ int cid = wil->ring2cid_tid[vring_index][0];
ether_addr_copy(eth->h_dest, wil->sta[cid].addr);
}
-static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
- struct wil6210_vif *vif,
- struct sk_buff *skb)
+static struct wil_ring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
+ struct wil6210_vif *vif,
+ struct sk_buff *skb)
{
- struct vring *v, *v2;
+ struct wil_ring *v, *v2;
struct sk_buff *skb2;
int i;
u8 cid;
struct ethhdr *eth = (void *)skb->data;
char *src = eth->h_source;
- struct vring_tx_data *txdata, *txdata2;
+ struct wil_ring_tx_data *txdata, *txdata2;
+ int min_ring_id = wil_get_min_tx_ring_id(wil);
/* find 1-st vring eligible for data */
- for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
- v = &wil->vring_tx[i];
- txdata = &wil->vring_tx_data[i];
+ for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
+ v = &wil->ring_tx[i];
+ txdata = &wil->ring_tx_data[i];
if (!v->va || !txdata->enabled || txdata->mid != vif->mid)
continue;
- cid = wil->vring2cid_tid[i][0];
+ cid = wil->ring2cid_tid[i][0];
if (cid >= WIL6210_MAX_CID) /* skip BCAST */
continue;
- if (!wil->vring_tx_data[i].dot1x_open &&
- (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ if (!wil->ring_tx_data[i].dot1x_open &&
+ skb->protocol != cpu_to_be16(ETH_P_PAE))
continue;
/* don't Tx back to source when re-routing Rx->Tx at the AP */
@@ -1298,15 +1294,15 @@ found:
/* find other active vrings and duplicate skb for each */
for (i++; i < WIL6210_MAX_TX_RINGS; i++) {
- v2 = &wil->vring_tx[i];
- txdata2 = &wil->vring_tx_data[i];
+ v2 = &wil->ring_tx[i];
+ txdata2 = &wil->ring_tx_data[i];
if (!v2->va || txdata2->mid != vif->mid)
continue;
- cid = wil->vring2cid_tid[i][0];
+ cid = wil->ring2cid_tid[i][0];
if (cid >= WIL6210_MAX_CID) /* skip BCAST */
continue;
- if (!wil->vring_tx_data[i].dot1x_open &&
- (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ if (!wil->ring_tx_data[i].dot1x_open &&
+ skb->protocol != cpu_to_be16(ETH_P_PAE))
continue;
if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
@@ -1316,7 +1312,7 @@ found:
if (skb2) {
wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
wil_set_da_for_vring(wil, skb2, i);
- wil_tx_vring(wil, vif, v2, skb2);
+ wil_tx_ring(wil, vif, v2, skb2);
} else {
wil_err(wil, "skb_copy failed\n");
}
@@ -1325,28 +1321,6 @@ found:
return v;
}
-static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
- int vring_index)
-{
- wil_desc_addr_set(&d->dma.addr, pa);
- d->dma.ip_length = 0;
- /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
- d->dma.b11 = 0/*14 | BIT(7)*/;
- d->dma.error = 0;
- d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
- d->dma.length = cpu_to_le16((u16)len);
- d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
- d->mac.d[0] = 0;
- d->mac.d[1] = 0;
- d->mac.d[2] = 0;
- d->mac.ucode_cmd = 0;
- /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
- d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
- (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
-
- return 0;
-}
-
static inline
void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
{
@@ -1454,7 +1428,7 @@ static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d)
}
static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
- struct vring *vring, struct sk_buff *skb)
+ struct wil_ring *vring, struct sk_buff *skb)
{
struct device *dev = wil_to_dev(wil);
@@ -1474,13 +1448,13 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
int sg_desc_cnt = 0; /* number of descriptors for current mss*/
u32 swhead = vring->swhead;
- int used, avail = wil_vring_avail_tx(vring);
+ int used, avail = wil_ring_avail_tx(vring);
int nr_frags = skb_shinfo(skb)->nr_frags;
int min_desc_required = nr_frags + 1;
int mss = skb_shinfo(skb)->gso_size; /* payload size w/o headers */
int f, len, hdrlen, headlen;
- int vring_index = vring - wil->vring_tx;
- struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
+ int vring_index = vring - wil->ring_tx;
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[vring_index];
uint i = swhead;
dma_addr_t pa;
const skb_frag_t *frag = NULL;
@@ -1548,7 +1522,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
tcp_hdr_len = tcp_hdrlen(skb);
skb_net_hdr_len = skb_network_header_len(skb);
- _hdr_desc = &vring->va[i].tx;
+ _hdr_desc = &vring->va[i].tx.legacy;
pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, pa))) {
@@ -1556,7 +1530,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
goto err_exit;
}
- wil_tx_desc_map(hdr_desc, pa, hdrlen, vring_index);
+ wil->txrx_ops.tx_desc_map((union wil_tx_desc *)hdr_desc, pa,
+ hdrlen, vring_index);
wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4,
tcp_hdr_len, skb_net_hdr_len);
wil_tx_last_desc(hdr_desc);
@@ -1613,7 +1588,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
goto mem_error;
}
- _desc = &vring->va[i].tx;
+ _desc = &vring->va[i].tx.legacy;
if (!_first_desc) {
_first_desc = _desc;
@@ -1623,7 +1598,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
d = &desc_mem;
}
- wil_tx_desc_map(d, pa, lenmss, vring_index);
+ wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d,
+ pa, lenmss, vring_index);
wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type,
is_ipv4, tcp_hdr_len,
skb_net_hdr_len);
@@ -1701,8 +1677,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
vring->ctx[i].skb = skb_get(skb);
/* performance monitoring */
- used = wil_vring_used_tx(vring);
- if (wil_val_in_range(wil->vring_idle_trsh,
+ used = wil_ring_used_tx(vring);
+ if (wil_val_in_range(wil->ring_idle_trsh,
used, used + descs_used)) {
txdata->idle += get_cycles() - txdata->last_idle;
wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
@@ -1717,7 +1693,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
wmb();
/* advance swhead */
- wil_vring_advance_head(vring, descs_used);
+ wil_ring_advance_head(vring, descs_used);
wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
/* make sure all writes to descriptors (shared memory) are done before
@@ -1725,6 +1701,11 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
*/
wmb();
+ if (wil->tx_latency)
+ *(ktime_t *)&skb->cb = ktime_get();
+ else
+ memset(skb->cb, 0, sizeof(ktime_t));
+
wil_w(wil, vring->hwtail, vring->swhead);
return 0;
@@ -1733,12 +1714,12 @@ mem_error:
struct wil_ctx *ctx;
i = (swhead + descs_used - 1) % vring->size;
- d = (struct vring_tx_desc *)&vring->va[i].tx;
- _desc = &vring->va[i].tx;
+ d = (struct vring_tx_desc *)&vring->va[i].tx.legacy;
+ _desc = &vring->va[i].tx.legacy;
*d = *_desc;
_desc->dma.status = TX_DMA_STATUS_DU;
ctx = &vring->ctx[i];
- wil_txdesc_unmap(dev, d, ctx);
+ wil_txdesc_unmap(dev, (union wil_tx_desc *)d, ctx);
memset(ctx, 0, sizeof(*ctx));
descs_used--;
}
@@ -1746,26 +1727,26 @@ err_exit:
return rc;
}
-static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
- struct vring *vring, struct sk_buff *skb)
+static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
+ struct wil_ring *ring, struct sk_buff *skb)
{
struct device *dev = wil_to_dev(wil);
struct vring_tx_desc dd, *d = &dd;
volatile struct vring_tx_desc *_d;
- u32 swhead = vring->swhead;
- int avail = wil_vring_avail_tx(vring);
+ u32 swhead = ring->swhead;
+ int avail = wil_ring_avail_tx(ring);
int nr_frags = skb_shinfo(skb)->nr_frags;
uint f = 0;
- int vring_index = vring - wil->vring_tx;
- struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
+ int ring_index = ring - wil->ring_tx;
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
uint i = swhead;
dma_addr_t pa;
int used;
- bool mcast = (vring_index == vif->bcast_vring);
+ bool mcast = (ring_index == vif->bcast_ring);
uint len = skb_headlen(skb);
- wil_dbg_txrx(wil, "tx_vring: %d bytes to vring %d\n", skb->len,
- vring_index);
+ wil_dbg_txrx(wil, "tx_ring: %d bytes to ring %d, nr_frags %d\n",
+ skb->len, ring_index, nr_frags);
if (unlikely(!txdata->enabled))
return -EINVAL;
@@ -1773,23 +1754,24 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
if (unlikely(avail < 1 + nr_frags)) {
wil_err_ratelimited(wil,
"Tx ring[%2d] full. No space for %d fragments\n",
- vring_index, 1 + nr_frags);
+ ring_index, 1 + nr_frags);
return -ENOMEM;
}
- _d = &vring->va[i].tx;
+ _d = &ring->va[i].tx.legacy;
pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
- wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", vring_index,
+ wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", ring_index,
skb_headlen(skb), skb->data, &pa);
wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb_headlen(skb), false);
if (unlikely(dma_mapping_error(dev, pa)))
return -EINVAL;
- vring->ctx[i].mapped_as = wil_mapped_as_single;
+ ring->ctx[i].mapped_as = wil_mapped_as_single;
/* 1-st segment */
- wil_tx_desc_map(d, pa, len, vring_index);
+ wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, pa, len,
+ ring_index);
if (unlikely(mcast)) {
d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */
@@ -1798,11 +1780,11 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
/* Process TCP/UDP checksum offloading */
if (unlikely(wil_tx_desc_offload_setup(d, skb))) {
wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
- vring_index);
+ ring_index);
goto dma_error;
}
- vring->ctx[i].nr_frags = nr_frags;
+ ring->ctx[i].nr_frags = nr_frags;
wil_tx_desc_set_nr_frags(d, nr_frags + 1);
/* middle segments */
@@ -1812,20 +1794,21 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
int len = skb_frag_size(frag);
*_d = *d;
- wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
+ wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", ring_index, i);
wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false);
- i = (swhead + f + 1) % vring->size;
- _d = &vring->va[i].tx;
+ i = (swhead + f + 1) % ring->size;
+ _d = &ring->va[i].tx.legacy;
pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, pa))) {
wil_err(wil, "Tx[%2d] failed to map fragment\n",
- vring_index);
+ ring_index);
goto dma_error;
}
- vring->ctx[i].mapped_as = wil_mapped_as_page;
- wil_tx_desc_map(d, pa, len, vring_index);
+ ring->ctx[i].mapped_as = wil_mapped_as_page;
+ wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d,
+ pa, len, ring_index);
/* no need to check return code -
* if it succeeded for 1-st descriptor,
* it will succeed here too
@@ -1837,7 +1820,7 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS);
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
*_d = *d;
- wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
+ wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", ring_index, i);
wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false);
@@ -1845,15 +1828,15 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
* to prevent skb release before accounting
* in case of immediate "tx done"
*/
- vring->ctx[i].skb = skb_get(skb);
+ ring->ctx[i].skb = skb_get(skb);
/* performance monitoring */
- used = wil_vring_used_tx(vring);
- if (wil_val_in_range(wil->vring_idle_trsh,
+ used = wil_ring_used_tx(ring);
+ if (wil_val_in_range(wil->ring_idle_trsh,
used, used + nr_frags + 1)) {
txdata->idle += get_cycles() - txdata->last_idle;
wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
- vring_index, used, used + nr_frags + 1);
+ ring_index, used, used + nr_frags + 1);
}
/* Make sure to advance the head only after descriptor update is done.
@@ -1864,17 +1847,22 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
wmb();
/* advance swhead */
- wil_vring_advance_head(vring, nr_frags + 1);
- wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead,
- vring->swhead);
- trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
+ wil_ring_advance_head(ring, nr_frags + 1);
+ wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", ring_index, swhead,
+ ring->swhead);
+ trace_wil6210_tx(ring_index, swhead, skb->len, nr_frags);
/* make sure all writes to descriptors (shared memory) are done before
* committing them to HW
*/
wmb();
- wil_w(wil, vring->hwtail, vring->swhead);
+ if (wil->tx_latency)
+ *(ktime_t *)&skb->cb = ktime_get();
+ else
+ memset(skb->cb, 0, sizeof(ktime_t));
+
+ wil_w(wil, ring->hwtail, ring->swhead);
return 0;
dma_error:
@@ -1883,12 +1871,14 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
for (f = 0; f < nr_frags; f++) {
struct wil_ctx *ctx;
- i = (swhead + f) % vring->size;
- ctx = &vring->ctx[i];
- _d = &vring->va[i].tx;
+ i = (swhead + f) % ring->size;
+ ctx = &ring->ctx[i];
+ _d = &ring->va[i].tx.legacy;
*d = *_d;
_d->dma.status = TX_DMA_STATUS_DU;
- wil_txdesc_unmap(dev, d, ctx);
+ wil->txrx_ops.tx_desc_unmap(dev,
+ (union wil_tx_desc *)d,
+ ctx);
memset(ctx, 0, sizeof(*ctx));
}
@@ -1896,11 +1886,11 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
return -EINVAL;
}
-static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
- struct vring *vring, struct sk_buff *skb)
+static int wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
+ struct wil_ring *ring, struct sk_buff *skb)
{
- int vring_index = vring - wil->vring_tx;
- struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
+ int ring_index = ring - wil->ring_tx;
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
int rc;
spin_lock(&txdata->lock);
@@ -1914,8 +1904,8 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
return -EINVAL;
}
- rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring)
- (wil, vif, vring, skb);
+ rc = (skb_is_gso(skb) ? wil->txrx_ops.tx_ring_tso : __wil_tx_ring)
+ (wil, vif, ring, skb);
spin_unlock(&txdata->lock);
@@ -1941,7 +1931,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
*/
static inline void __wil_update_net_queues(struct wil6210_priv *wil,
struct wil6210_vif *vif,
- struct vring *vring,
+ struct wil_ring *ring,
bool check_stop)
{
int i;
@@ -1949,9 +1939,9 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil,
if (unlikely(!vif))
return;
- if (vring)
+ if (ring)
wil_dbg_txrx(wil, "vring %d, mid %d, check_stop=%d, stopped=%d",
- (int)(vring - wil->vring_tx), vif->mid, check_stop,
+ (int)(ring - wil->ring_tx), vif->mid, check_stop,
vif->net_queue_stopped);
else
wil_dbg_txrx(wil, "check_stop=%d, mid=%d, stopped=%d",
@@ -1962,7 +1952,7 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil,
return;
if (check_stop) {
- if (!vring || unlikely(wil_vring_avail_low(vring))) {
+ if (!ring || unlikely(wil_ring_avail_low(ring))) {
/* not enough room in the vring */
netif_tx_stop_all_queues(vif_to_ndev(vif));
vif->net_queue_stopped = true;
@@ -1978,22 +1968,22 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil,
/* check wake */
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
- struct vring *cur_vring = &wil->vring_tx[i];
- struct vring_tx_data *txdata = &wil->vring_tx_data[i];
+ struct wil_ring *cur_ring = &wil->ring_tx[i];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
- if (txdata->mid != vif->mid || !cur_vring->va ||
- !txdata->enabled || cur_vring == vring)
+ if (txdata->mid != vif->mid || !cur_ring->va ||
+ !txdata->enabled || cur_ring == ring)
continue;
- if (wil_vring_avail_low(cur_vring)) {
- wil_dbg_txrx(wil, "vring %d full, can't wake\n",
- (int)(cur_vring - wil->vring_tx));
+ if (wil_ring_avail_low(cur_ring)) {
+ wil_dbg_txrx(wil, "ring %d full, can't wake\n",
+ (int)(cur_ring - wil->ring_tx));
return;
}
}
- if (!vring || wil_vring_avail_high(vring)) {
- /* enough room in the vring */
+ if (!ring || wil_ring_avail_high(ring)) {
+ /* enough room in the ring */
wil_dbg_txrx(wil, "calling netif_tx_wake\n");
netif_tx_wake_all_queues(vif_to_ndev(vif));
vif->net_queue_stopped = false;
@@ -2001,18 +1991,18 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil,
}
void wil_update_net_queues(struct wil6210_priv *wil, struct wil6210_vif *vif,
- struct vring *vring, bool check_stop)
+ struct wil_ring *ring, bool check_stop)
{
spin_lock(&wil->net_queue_lock);
- __wil_update_net_queues(wil, vif, vring, check_stop);
+ __wil_update_net_queues(wil, vif, ring, check_stop);
spin_unlock(&wil->net_queue_lock);
}
void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif,
- struct vring *vring, bool check_stop)
+ struct wil_ring *ring, bool check_stop)
{
spin_lock_bh(&wil->net_queue_lock);
- __wil_update_net_queues(wil, vif, vring, check_stop);
+ __wil_update_net_queues(wil, vif, ring, check_stop);
spin_unlock_bh(&wil->net_queue_lock);
}
@@ -2022,7 +2012,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct wil6210_priv *wil = vif_to_wil(vif);
struct ethhdr *eth = (void *)skb->data;
bool bcast = is_multicast_ether_addr(eth->h_dest);
- struct vring *vring;
+ struct wil_ring *ring;
static bool pr_once_fw;
int rc;
@@ -2048,36 +2038,36 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* find vring */
if (vif->wdev.iftype == NL80211_IFTYPE_STATION && !vif->pbss) {
/* in STA mode (ESS), all to same VRING (to AP) */
- vring = wil_find_tx_vring_sta(wil, vif, skb);
+ ring = wil_find_tx_ring_sta(wil, vif, skb);
} else if (bcast) {
if (vif->pbss)
/* in pbss, no bcast VRING - duplicate skb in
* all stations VRINGs
*/
- vring = wil_find_tx_bcast_2(wil, vif, skb);
+ ring = wil_find_tx_bcast_2(wil, vif, skb);
else if (vif->wdev.iftype == NL80211_IFTYPE_AP)
/* AP has a dedicated bcast VRING */
- vring = wil_find_tx_bcast_1(wil, vif, skb);
+ ring = wil_find_tx_bcast_1(wil, vif, skb);
else
/* unexpected combination, fallback to duplicating
* the skb in all stations VRINGs
*/
- vring = wil_find_tx_bcast_2(wil, vif, skb);
+ ring = wil_find_tx_bcast_2(wil, vif, skb);
} else {
/* unicast, find specific VRING by dest. address */
- vring = wil_find_tx_ucast(wil, vif, skb);
+ ring = wil_find_tx_ucast(wil, vif, skb);
}
- if (unlikely(!vring)) {
- wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest);
+ if (unlikely(!ring)) {
+ wil_dbg_txrx(wil, "No Tx RING found for %pM\n", eth->h_dest);
goto drop;
}
/* set up vring entry */
- rc = wil_tx_vring(wil, vif, vring, skb);
+ rc = wil_tx_ring(wil, vif, ring, skb);
switch (rc) {
case 0:
/* shall we stop net queues? */
- wil_update_net_queues_bh(wil, vif, vring, true);
+ wil_update_net_queues_bh(wil, vif, ring, true);
/* statistics will be updated on the tx_complete */
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -2093,20 +2083,29 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NET_XMIT_DROP;
}
-static inline bool wil_need_txstat(struct sk_buff *skb)
+void wil_tx_latency_calc(struct wil6210_priv *wil, struct sk_buff *skb,
+ struct wil_sta_info *sta)
{
- struct ethhdr *eth = (void *)skb->data;
+ int skb_time_us;
+ int bin;
- return is_unicast_ether_addr(eth->h_dest) && skb->sk &&
- (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS);
-}
+ if (!wil->tx_latency)
+ return;
-static inline void wil_consume_skb(struct sk_buff *skb, bool acked)
-{
- if (unlikely(wil_need_txstat(skb)))
- skb_complete_wifi_ack(skb, acked);
- else
- acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb);
+ if (ktime_to_ms(*(ktime_t *)&skb->cb) == 0)
+ return;
+
+ skb_time_us = ktime_us_delta(ktime_get(), *(ktime_t *)&skb->cb);
+ bin = skb_time_us / wil->tx_latency_res;
+ bin = min_t(int, bin, WIL_NUM_LATENCY_BINS - 1);
+
+ wil_dbg_txrx(wil, "skb time %dus => bin %d\n", skb_time_us, bin);
+ sta->tx_latency_bins[bin]++;
+ sta->stats.tx_latency_total_us += skb_time_us;
+ if (skb_time_us < sta->stats.tx_latency_min_us)
+ sta->stats.tx_latency_min_us = skb_time_us;
+ if (skb_time_us > sta->stats.tx_latency_max_us)
+ sta->stats.tx_latency_max_us = skb_time_us;
}
/**
@@ -2121,10 +2120,10 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
struct wil6210_priv *wil = vif_to_wil(vif);
struct net_device *ndev = vif_to_ndev(vif);
struct device *dev = wil_to_dev(wil);
- struct vring *vring = &wil->vring_tx[ringid];
- struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
+ struct wil_ring *vring = &wil->ring_tx[ringid];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ringid];
int done = 0;
- int cid = wil->vring2cid_tid[ringid][0];
+ int cid = wil->ring2cid_tid[ringid][0];
struct wil_net_stats *stats = NULL;
volatile struct vring_tx_desc *_d;
int used_before_complete;
@@ -2142,12 +2141,12 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
wil_dbg_txrx(wil, "tx_complete: (%d)\n", ringid);
- used_before_complete = wil_vring_used_tx(vring);
+ used_before_complete = wil_ring_used_tx(vring);
if (cid < WIL6210_MAX_CID)
stats = &wil->sta[cid].stats;
- while (!wil_vring_is_empty(vring)) {
+ while (!wil_ring_is_empty(vring)) {
int new_swtail;
struct wil_ctx *ctx = &vring->ctx[vring->swtail];
/**
@@ -2158,7 +2157,7 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
int lf = (vring->swtail + ctx->nr_frags) % vring->size;
/* TODO: check we are not past head */
- _d = &vring->va[lf].tx;
+ _d = &vring->va[lf].tx.legacy;
if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU)))
break;
@@ -2170,7 +2169,7 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
ctx = &vring->ctx[vring->swtail];
skb = ctx->skb;
- _d = &vring->va[vring->swtail].tx;
+ _d = &vring->va[vring->swtail].tx.legacy;
*d = *_d;
@@ -2184,7 +2183,9 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false);
- wil_txdesc_unmap(dev, d, ctx);
+ wil->txrx_ops.tx_desc_unmap(dev,
+ (union wil_tx_desc *)d,
+ ctx);
if (skb) {
if (likely(d->dma.error == 0)) {
@@ -2193,6 +2194,9 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
if (stats) {
stats->tx_packets++;
stats->tx_bytes += skb->len;
+
+ wil_tx_latency_calc(wil, skb,
+ &wil->sta[cid]);
}
} else {
ndev->stats.tx_errors++;
@@ -2203,7 +2207,7 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
}
memset(ctx, 0, sizeof(*ctx));
/* Make sure the ctx is zeroed before updating the tail
- * to prevent a case where wil_tx_vring will see
+ * to prevent a case where wil_tx_ring will see
* this descriptor as used and handle it before ctx zero
* is completed.
*/
@@ -2213,14 +2217,14 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
* so hardware will not try to process this desc.,
* - rest of descriptor will be initialized on Tx.
*/
- vring->swtail = wil_vring_next_tail(vring);
+ vring->swtail = wil_ring_next_tail(vring);
done++;
}
}
/* performance monitoring */
- used_new = wil_vring_used_tx(vring);
- if (wil_val_in_range(wil->vring_idle_trsh,
+ used_new = wil_ring_used_tx(vring);
+ if (wil_val_in_range(wil->ring_idle_trsh,
used_new, used_before_complete)) {
wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
ringid, used_before_complete, used_new);
@@ -2233,3 +2237,49 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
return done;
}
+
+static inline int wil_tx_init(struct wil6210_priv *wil)
+{
+ return 0;
+}
+
+static inline void wil_tx_fini(struct wil6210_priv *wil) {}
+
+static void wil_get_reorder_params(struct wil6210_priv *wil,
+ struct sk_buff *skb, int *tid, int *cid,
+ int *mid, u16 *seq, int *mcast, int *retry)
+{
+ struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+
+ *tid = wil_rxdesc_tid(d);
+ *cid = wil_rxdesc_cid(d);
+ *mid = wil_rxdesc_mid(d);
+ *seq = wil_rxdesc_seq(d);
+ *mcast = wil_rxdesc_mcast(d);
+ *retry = wil_rxdesc_retry(d);
+}
+
+void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil)
+{
+ wil->txrx_ops.configure_interrupt_moderation =
+ wil_configure_interrupt_moderation;
+ /* TX ops */
+ wil->txrx_ops.tx_desc_map = wil_tx_desc_map;
+ wil->txrx_ops.tx_desc_unmap = wil_txdesc_unmap;
+ wil->txrx_ops.tx_ring_tso = __wil_tx_vring_tso;
+ wil->txrx_ops.ring_init_tx = wil_vring_init_tx;
+ wil->txrx_ops.ring_fini_tx = wil_vring_free;
+ wil->txrx_ops.ring_init_bcast = wil_vring_init_bcast;
+ wil->txrx_ops.tx_init = wil_tx_init;
+ wil->txrx_ops.tx_fini = wil_tx_fini;
+ /* RX ops */
+ wil->txrx_ops.rx_init = wil_rx_init;
+ wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp;
+ wil->txrx_ops.get_reorder_params = wil_get_reorder_params;
+ wil->txrx_ops.get_netif_rx_params =
+ wil_get_netif_rx_params;
+ wil->txrx_ops.rx_crypto_check = wil_rx_crypto_check;
+ wil->txrx_ops.rx_error_check = wil_rx_error_check;
+ wil->txrx_ops.is_rx_idle = wil_is_rx_idle;
+ wil->txrx_ops.rx_fini = wil_rx_fini;
+}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index 5f07717acc2c..9d83be481839 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -18,6 +18,9 @@
#ifndef WIL6210_TXRX_H
#define WIL6210_TXRX_H
+#include "wil6210.h"
+#include "txrx_edma.h"
+
#define BUF_SW_OWNED (1)
#define BUF_HW_OWNED (0)
@@ -29,19 +32,13 @@
/* Tx/Rx path */
-/* Common representation of physical address in Vring */
-struct vring_dma_addr {
- __le32 addr_low;
- __le16 addr_high;
-} __packed;
-
-static inline dma_addr_t wil_desc_addr(struct vring_dma_addr *addr)
+static inline dma_addr_t wil_desc_addr(struct wil_ring_dma_addr *addr)
{
return le32_to_cpu(addr->addr_low) |
((u64)le16_to_cpu(addr->addr_high) << 32);
}
-static inline void wil_desc_addr_set(struct vring_dma_addr *addr,
+static inline void wil_desc_addr_set(struct wil_ring_dma_addr *addr,
dma_addr_t pa)
{
addr->addr_low = cpu_to_le32(lower_32_bits(pa));
@@ -294,7 +291,7 @@ struct vring_tx_mac {
*/
struct vring_tx_dma {
u32 d0;
- struct vring_dma_addr addr;
+ struct wil_ring_dma_addr addr;
u8 ip_length;
u8 b11; /* 0..6: mac_length; 7:ip_version */
u8 error; /* 0..2: err; 3..7: reserved; */
@@ -428,7 +425,7 @@ struct vring_rx_mac {
struct vring_rx_dma {
u32 d0;
- struct vring_dma_addr addr;
+ struct wil_ring_dma_addr addr;
u8 ip_length;
u8 b11;
u8 error;
@@ -441,14 +438,24 @@ struct vring_tx_desc {
struct vring_tx_dma dma;
} __packed;
+union wil_tx_desc {
+ struct vring_tx_desc legacy;
+ struct wil_tx_enhanced_desc enhanced;
+} __packed;
+
struct vring_rx_desc {
struct vring_rx_mac mac;
struct vring_rx_dma dma;
} __packed;
-union vring_desc {
- struct vring_tx_desc tx;
- struct vring_rx_desc rx;
+union wil_rx_desc {
+ struct vring_rx_desc legacy;
+ struct wil_rx_enhanced_desc enhanced;
+} __packed;
+
+union wil_ring_desc {
+ union wil_tx_desc tx;
+ union wil_rx_desc rx;
} __packed;
static inline int wil_rxdesc_tid(struct vring_rx_desc *d)
@@ -493,6 +500,11 @@ static inline int wil_rxdesc_ext_subtype(struct vring_rx_desc *d)
return WIL_GET_BITS(d->mac.d0, 28, 31);
}
+static inline int wil_rxdesc_retry(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d0, 31, 31);
+}
+
static inline int wil_rxdesc_key_id(struct vring_rx_desc *d)
{
return WIL_GET_BITS(d->mac.d1, 4, 5);
@@ -528,6 +540,76 @@ static inline struct vring_rx_desc *wil_skb_rxdesc(struct sk_buff *skb)
return (void *)skb->cb;
}
+static inline int wil_ring_is_empty(struct wil_ring *ring)
+{
+ return ring->swhead == ring->swtail;
+}
+
+static inline u32 wil_ring_next_tail(struct wil_ring *ring)
+{
+ return (ring->swtail + 1) % ring->size;
+}
+
+static inline void wil_ring_advance_head(struct wil_ring *ring, int n)
+{
+ ring->swhead = (ring->swhead + n) % ring->size;
+}
+
+static inline int wil_ring_is_full(struct wil_ring *ring)
+{
+ return wil_ring_next_tail(ring) == ring->swhead;
+}
+
+static inline bool wil_need_txstat(struct sk_buff *skb)
+{
+ struct ethhdr *eth = (void *)skb->data;
+
+ return is_unicast_ether_addr(eth->h_dest) && skb->sk &&
+ (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS);
+}
+
+static inline void wil_consume_skb(struct sk_buff *skb, bool acked)
+{
+ if (unlikely(wil_need_txstat(skb)))
+ skb_complete_wifi_ack(skb, acked);
+ else
+ acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb);
+}
+
+/* Used space in Tx ring */
+static inline int wil_ring_used_tx(struct wil_ring *ring)
+{
+ u32 swhead = ring->swhead;
+ u32 swtail = ring->swtail;
+
+ return (ring->size + swhead - swtail) % ring->size;
+}
+
+/* Available space in Tx ring */
+static inline int wil_ring_avail_tx(struct wil_ring *ring)
+{
+ return ring->size - wil_ring_used_tx(ring) - 1;
+}
+
+static inline int wil_get_min_tx_ring_id(struct wil6210_priv *wil)
+{
+ /* In Enhanced DMA ring 0 is reserved for RX */
+ return wil->use_enhanced_dma_hw ? 1 : 0;
+}
+
+/* similar to ieee80211_ version, but FC contain only 1-st byte */
+static inline int wil_is_back_req(u8 fc)
+{
+ return (fc & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ (IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
+}
+
+/* wil_val_in_range - check if value in [min,max) */
+static inline bool wil_val_in_range(int val, int min, int max)
+{
+ return val >= min && val < max;
+}
+
void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev);
void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb);
void wil_rx_bar(struct wil6210_priv *wil, struct wil6210_vif *vif,
@@ -536,5 +618,9 @@ struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
int size, u16 ssn);
void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
struct wil_tid_ampdu_rx *r);
+void wil_tx_data_init(struct wil_ring_tx_data *txdata);
+void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil);
+void wil_tx_latency_calc(struct wil6210_priv *wil, struct sk_buff *skb,
+ struct wil_sta_info *sta);
#endif /* WIL6210_TXRX_H */
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
new file mode 100644
index 000000000000..bca61cb44c37
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -0,0 +1,1608 @@
+/*
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/moduleparam.h>
+#include <linux/prefetch.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include "wil6210.h"
+#include "txrx_edma.h"
+#include "txrx.h"
+#include "trace.h"
+
+#define WIL_EDMA_MAX_DATA_OFFSET (2)
+/* RX buffer size must be aligned to 4 bytes */
+#define WIL_EDMA_RX_BUF_LEN_DEFAULT (2048)
+
+static void wil_tx_desc_unmap_edma(struct device *dev,
+ union wil_tx_desc *desc,
+ struct wil_ctx *ctx)
+{
+ struct wil_tx_enhanced_desc *d = (struct wil_tx_enhanced_desc *)desc;
+ dma_addr_t pa = wil_tx_desc_get_addr_edma(&d->dma);
+ u16 dmalen = le16_to_cpu(d->dma.length);
+
+ switch (ctx->mapped_as) {
+ case wil_mapped_as_single:
+ dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
+ break;
+ case wil_mapped_as_page:
+ dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
+ break;
+ default:
+ break;
+ }
+}
+
+static int wil_find_free_sring(struct wil6210_priv *wil)
+{
+ int i;
+
+ for (i = 0; i < WIL6210_MAX_STATUS_RINGS; i++) {
+ if (!wil->srings[i].va)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static void wil_sring_free(struct wil6210_priv *wil,
+ struct wil_status_ring *sring)
+{
+ struct device *dev = wil_to_dev(wil);
+ size_t sz;
+
+ if (!sring || !sring->va)
+ return;
+
+ sz = sring->elem_size * sring->size;
+
+ wil_dbg_misc(wil, "status_ring_free, size(bytes)=%zu, 0x%p:%pad\n",
+ sz, sring->va, &sring->pa);
+
+ dma_free_coherent(dev, sz, (void *)sring->va, sring->pa);
+ sring->pa = 0;
+ sring->va = NULL;
+}
+
+static int wil_sring_alloc(struct wil6210_priv *wil,
+ struct wil_status_ring *sring)
+{
+ struct device *dev = wil_to_dev(wil);
+ size_t sz = sring->elem_size * sring->size;
+
+ wil_dbg_misc(wil, "status_ring_alloc: size=%zu\n", sz);
+
+ if (sz == 0) {
+ wil_err(wil, "Cannot allocate a zero size status ring\n");
+ return -EINVAL;
+ }
+
+ sring->swhead = 0;
+
+ /* Status messages are allocated and initialized to 0. This is necessary
+ * since DR bit should be initialized to 0.
+ */
+ sring->va = dma_zalloc_coherent(dev, sz, &sring->pa, GFP_KERNEL);
+ if (!sring->va)
+ return -ENOMEM;
+
+ wil_dbg_misc(wil, "status_ring[%d] 0x%p:%pad\n", sring->size, sring->va,
+ &sring->pa);
+
+ return 0;
+}
+
+static int wil_tx_init_edma(struct wil6210_priv *wil)
+{
+ int ring_id = wil_find_free_sring(wil);
+ struct wil_status_ring *sring;
+ int rc;
+ u16 status_ring_size;
+
+ if (wil->tx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN ||
+ wil->tx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX)
+ wil->tx_status_ring_order = WIL_TX_SRING_SIZE_ORDER_DEFAULT;
+
+ status_ring_size = 1 << wil->tx_status_ring_order;
+
+ wil_dbg_misc(wil, "init TX sring: size=%u, ring_id=%u\n",
+ status_ring_size, ring_id);
+
+ if (ring_id < 0)
+ return ring_id;
+
+ /* Allocate Tx status ring. Tx descriptor rings will be
+ * allocated on WMI connect event
+ */
+ sring = &wil->srings[ring_id];
+
+ sring->is_rx = false;
+ sring->size = status_ring_size;
+ sring->elem_size = sizeof(struct wil_ring_tx_status);
+ rc = wil_sring_alloc(wil, sring);
+ if (rc)
+ return rc;
+
+ rc = wil_wmi_tx_sring_cfg(wil, ring_id);
+ if (rc)
+ goto out_free;
+
+ sring->desc_rdy_pol = 1;
+ wil->tx_sring_idx = ring_id;
+
+ return 0;
+out_free:
+ wil_sring_free(wil, sring);
+ return rc;
+}
+
+/**
+ * Allocate one skb for Rx descriptor RING
+ */
+static int wil_ring_alloc_skb_edma(struct wil6210_priv *wil,
+ struct wil_ring *ring, u32 i)
+{
+ struct device *dev = wil_to_dev(wil);
+ unsigned int sz = ALIGN(wil->rx_buf_len, 4);
+ dma_addr_t pa;
+ u16 buff_id;
+ struct list_head *active = &wil->rx_buff_mgmt.active;
+ struct list_head *free = &wil->rx_buff_mgmt.free;
+ struct wil_rx_buff *rx_buff;
+ struct wil_rx_buff *buff_arr = wil->rx_buff_mgmt.buff_arr;
+ struct sk_buff *skb;
+ struct wil_rx_enhanced_desc dd, *d = &dd;
+ struct wil_rx_enhanced_desc *_d = (struct wil_rx_enhanced_desc *)
+ &ring->va[i].rx.enhanced;
+
+ if (unlikely(list_empty(free))) {
+ wil->rx_buff_mgmt.free_list_empty_cnt++;
+ return -EAGAIN;
+ }
+
+ skb = dev_alloc_skb(sz);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ skb_put(skb, sz);
+
+ /**
+ * Make sure that the network stack calculates checksum for packets
+ * which failed the HW checksum calculation
+ */
+ skb->ip_summed = CHECKSUM_NONE;
+
+ pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, pa))) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ /* Get the buffer ID - the index of the rx buffer in the buff_arr */
+ rx_buff = list_first_entry(free, struct wil_rx_buff, list);
+ buff_id = rx_buff->id;
+
+ /* Move a buffer from the free list to the active list */
+ list_move(&rx_buff->list, active);
+
+ buff_arr[buff_id].skb = skb;
+
+ wil_desc_set_addr_edma(&d->dma.addr, &d->dma.addr_high_high, pa);
+ d->dma.length = cpu_to_le16(sz);
+ d->mac.buff_id = cpu_to_le16(buff_id);
+ *_d = *d;
+
+ /* Save the physical address in skb->cb for later use in dma_unmap */
+ memcpy(skb->cb, &pa, sizeof(pa));
+
+ return 0;
+}
+
+static inline
+void wil_get_next_rx_status_msg(struct wil_status_ring *sring, void *msg)
+{
+ memcpy(msg, (void *)(sring->va + (sring->elem_size * sring->swhead)),
+ sring->elem_size);
+}
+
+static inline void wil_sring_advance_swhead(struct wil_status_ring *sring)
+{
+ sring->swhead = (sring->swhead + 1) % sring->size;
+ if (sring->swhead == 0)
+ sring->desc_rdy_pol = 1 - sring->desc_rdy_pol;
+}
+
+static int wil_rx_refill_edma(struct wil6210_priv *wil)
+{
+ struct wil_ring *ring = &wil->ring_rx;
+ u32 next_head;
+ int rc = 0;
+ u32 swtail = *ring->edma_rx_swtail.va;
+
+ for (; next_head = wil_ring_next_head(ring), (next_head != swtail);
+ ring->swhead = next_head) {
+ rc = wil_ring_alloc_skb_edma(wil, ring, ring->swhead);
+ if (unlikely(rc)) {
+ if (rc == -EAGAIN)
+ wil_dbg_txrx(wil, "No free buffer ID found\n");
+ else
+ wil_err_ratelimited(wil,
+ "Error %d in refill desc[%d]\n",
+ rc, ring->swhead);
+ break;
+ }
+ }
+
+ /* make sure all writes to descriptors (shared memory) are done before
+ * committing them to HW
+ */
+ wmb();
+
+ wil_w(wil, ring->hwtail, ring->swhead);
+
+ return rc;
+}
+
+static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv *wil,
+ struct wil_ring *ring)
+{
+ struct device *dev = wil_to_dev(wil);
+ u32 next_tail;
+ u32 swhead = (ring->swhead + 1) % ring->size;
+ dma_addr_t pa;
+ u16 dmalen;
+
+ for (; next_tail = wil_ring_next_tail(ring), (next_tail != swhead);
+ ring->swtail = next_tail) {
+ struct wil_rx_enhanced_desc dd, *d = &dd;
+ struct wil_rx_enhanced_desc *_d =
+ (struct wil_rx_enhanced_desc *)
+ &ring->va[ring->swtail].rx.enhanced;
+ struct sk_buff *skb;
+ u16 buff_id;
+
+ *d = *_d;
+ pa = wil_rx_desc_get_addr_edma(&d->dma);
+ dmalen = le16_to_cpu(d->dma.length);
+ dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
+
+ /* Extract the SKB from the rx_buff management array */
+ buff_id = __le16_to_cpu(d->mac.buff_id);
+ if (buff_id >= wil->rx_buff_mgmt.size) {
+ wil_err(wil, "invalid buff_id %d\n", buff_id);
+ continue;
+ }
+ skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
+ wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL;
+ if (unlikely(!skb))
+ wil_err(wil, "No Rx skb at buff_id %d\n", buff_id);
+ else
+ kfree_skb(skb);
+
+ /* Move the buffer from the active to the free list */
+ list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
+ &wil->rx_buff_mgmt.free);
+ }
+}
+
+static void wil_free_rx_buff_arr(struct wil6210_priv *wil)
+{
+ struct wil_ring *ring = &wil->ring_rx;
+
+ if (!wil->rx_buff_mgmt.buff_arr)
+ return;
+
+ /* Move all the buffers to the free list in case active list is
+ * not empty in order to release all SKBs before deleting the array
+ */
+ wil_move_all_rx_buff_to_free_list(wil, ring);
+
+ kfree(wil->rx_buff_mgmt.buff_arr);
+ wil->rx_buff_mgmt.buff_arr = NULL;
+}
+
+static int wil_init_rx_buff_arr(struct wil6210_priv *wil,
+ size_t size)
+{
+ struct wil_rx_buff *buff_arr;
+ struct list_head *active = &wil->rx_buff_mgmt.active;
+ struct list_head *free = &wil->rx_buff_mgmt.free;
+ int i;
+
+ wil->rx_buff_mgmt.buff_arr = kcalloc(size, sizeof(struct wil_rx_buff),
+ GFP_KERNEL);
+ if (!wil->rx_buff_mgmt.buff_arr)
+ return -ENOMEM;
+
+ /* Set list heads */
+ INIT_LIST_HEAD(active);
+ INIT_LIST_HEAD(free);
+
+ /* Linkify the list */
+ buff_arr = wil->rx_buff_mgmt.buff_arr;
+ for (i = 0; i < size; i++) {
+ list_add(&buff_arr[i].list, free);
+ buff_arr[i].id = i;
+ }
+
+ wil->rx_buff_mgmt.size = size;
+
+ return 0;
+}
+
+static int wil_init_rx_sring(struct wil6210_priv *wil,
+ u16 status_ring_size,
+ size_t elem_size,
+ u16 ring_id)
+{
+ struct wil_status_ring *sring = &wil->srings[ring_id];
+ int rc;
+
+ wil_dbg_misc(wil, "init RX sring: size=%u, ring_id=%u\n", sring->size,
+ ring_id);
+
+ memset(&sring->rx_data, 0, sizeof(sring->rx_data));
+
+ sring->is_rx = true;
+ sring->size = status_ring_size;
+ sring->elem_size = elem_size;
+ rc = wil_sring_alloc(wil, sring);
+ if (rc)
+ return rc;
+
+ rc = wil_wmi_rx_sring_add(wil, ring_id);
+ if (rc)
+ goto out_free;
+
+ sring->desc_rdy_pol = 1;
+
+ return 0;
+out_free:
+ wil_sring_free(wil, sring);
+ return rc;
+}
+
+static int wil_ring_alloc_desc_ring(struct wil6210_priv *wil,
+ struct wil_ring *ring)
+{
+ struct device *dev = wil_to_dev(wil);
+ size_t sz = ring->size * sizeof(ring->va[0]);
+
+ wil_dbg_misc(wil, "alloc_desc_ring:\n");
+
+ BUILD_BUG_ON(sizeof(ring->va[0]) != 32);
+
+ ring->swhead = 0;
+ ring->swtail = 0;
+ ring->ctx = kcalloc(ring->size, sizeof(ring->ctx[0]), GFP_KERNEL);
+ if (!ring->ctx)
+ goto err;
+
+ ring->va = dma_zalloc_coherent(dev, sz, &ring->pa, GFP_KERNEL);
+ if (!ring->va)
+ goto err_free_ctx;
+
+ if (ring->is_rx) {
+ sz = sizeof(*ring->edma_rx_swtail.va);
+ ring->edma_rx_swtail.va =
+ dma_zalloc_coherent(dev, sz, &ring->edma_rx_swtail.pa,
+ GFP_KERNEL);
+ if (!ring->edma_rx_swtail.va)
+ goto err_free_va;
+ }
+
+ wil_dbg_misc(wil, "%s ring[%d] 0x%p:%pad 0x%p\n",
+ ring->is_rx ? "RX" : "TX",
+ ring->size, ring->va, &ring->pa, ring->ctx);
+
+ return 0;
+err_free_va:
+ dma_free_coherent(dev, ring->size * sizeof(ring->va[0]),
+ (void *)ring->va, ring->pa);
+ ring->va = NULL;
+err_free_ctx:
+ kfree(ring->ctx);
+ ring->ctx = NULL;
+err:
+ return -ENOMEM;
+}
+
+static void wil_ring_free_edma(struct wil6210_priv *wil, struct wil_ring *ring)
+{
+ struct device *dev = wil_to_dev(wil);
+ size_t sz;
+ int ring_index = 0;
+
+ if (!ring->va)
+ return;
+
+ sz = ring->size * sizeof(ring->va[0]);
+
+ lockdep_assert_held(&wil->mutex);
+ if (ring->is_rx) {
+ wil_dbg_misc(wil, "free Rx ring [%d] 0x%p:%pad 0x%p\n",
+ ring->size, ring->va,
+ &ring->pa, ring->ctx);
+
+ wil_move_all_rx_buff_to_free_list(wil, ring);
+ goto out;
+ }
+
+ /* TX ring */
+ ring_index = ring - wil->ring_tx;
+
+ wil_dbg_misc(wil, "free Tx ring %d [%d] 0x%p:%pad 0x%p\n",
+ ring_index, ring->size, ring->va,
+ &ring->pa, ring->ctx);
+
+ while (!wil_ring_is_empty(ring)) {
+ struct wil_ctx *ctx;
+
+ struct wil_tx_enhanced_desc dd, *d = &dd;
+ struct wil_tx_enhanced_desc *_d =
+ (struct wil_tx_enhanced_desc *)
+ &ring->va[ring->swtail].tx.enhanced;
+
+ ctx = &ring->ctx[ring->swtail];
+ if (!ctx) {
+ wil_dbg_txrx(wil,
+ "ctx(%d) was already completed\n",
+ ring->swtail);
+ ring->swtail = wil_ring_next_tail(ring);
+ continue;
+ }
+ *d = *_d;
+ wil_tx_desc_unmap_edma(dev, (union wil_tx_desc *)d, ctx);
+ if (ctx->skb)
+ dev_kfree_skb_any(ctx->skb);
+ ring->swtail = wil_ring_next_tail(ring);
+ }
+
+out:
+ dma_free_coherent(dev, sz, (void *)ring->va, ring->pa);
+ kfree(ring->ctx);
+ ring->pa = 0;
+ ring->va = NULL;
+ ring->ctx = NULL;
+}
+
+static int wil_init_rx_desc_ring(struct wil6210_priv *wil, u16 desc_ring_size,
+ int status_ring_id)
+{
+ struct wil_ring *ring = &wil->ring_rx;
+ int rc;
+
+ wil_dbg_misc(wil, "init RX desc ring\n");
+
+ ring->size = desc_ring_size;
+ ring->is_rx = true;
+ rc = wil_ring_alloc_desc_ring(wil, ring);
+ if (rc)
+ return rc;
+
+ rc = wil_wmi_rx_desc_ring_add(wil, status_ring_id);
+ if (rc)
+ goto out_free;
+
+ return 0;
+out_free:
+ wil_ring_free_edma(wil, ring);
+ return rc;
+}
+
+static void wil_get_reorder_params_edma(struct wil6210_priv *wil,
+ struct sk_buff *skb, int *tid,
+ int *cid, int *mid, u16 *seq,
+ int *mcast, int *retry)
+{
+ struct wil_rx_status_extended *s = wil_skb_rxstatus(skb);
+
+ *tid = wil_rx_status_get_tid(s);
+ *cid = wil_rx_status_get_cid(s);
+ *mid = wil_rx_status_get_mid(s);
+ *seq = le16_to_cpu(wil_rx_status_get_seq(wil, s));
+ *mcast = wil_rx_status_get_mcast(s);
+ *retry = wil_rx_status_get_retry(s);
+}
+
+static void wil_get_netif_rx_params_edma(struct sk_buff *skb, int *cid,
+ int *security)
+{
+ struct wil_rx_status_extended *s = wil_skb_rxstatus(skb);
+
+ *cid = wil_rx_status_get_cid(s);
+ *security = wil_rx_status_get_security(s);
+}
+
+static int wil_rx_crypto_check_edma(struct wil6210_priv *wil,
+ struct sk_buff *skb)
+{
+ struct wil_rx_status_extended *st;
+ int cid, tid, key_id, mc;
+ struct wil_sta_info *s;
+ struct wil_tid_crypto_rx *c;
+ struct wil_tid_crypto_rx_single *cc;
+ const u8 *pn;
+
+ /* In HW reorder, HW is responsible for crypto check */
+ if (wil->use_rx_hw_reordering)
+ return 0;
+
+ st = wil_skb_rxstatus(skb);
+
+ cid = wil_rx_status_get_cid(st);
+ tid = wil_rx_status_get_tid(st);
+ key_id = wil_rx_status_get_key_id(st);
+ mc = wil_rx_status_get_mcast(st);
+ s = &wil->sta[cid];
+ c = mc ? &s->group_crypto_rx : &s->tid_crypto_rx[tid];
+ cc = &c->key_id[key_id];
+ pn = (u8 *)&st->ext.pn_15_0;
+
+ if (!cc->key_set) {
+ wil_err_ratelimited(wil,
+ "Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
+ cid, tid, mc, key_id);
+ return -EINVAL;
+ }
+
+ if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) {
+ wil_err_ratelimited(wil,
+ "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
+ cid, tid, mc, key_id, pn, cc->pn);
+ return -EINVAL;
+ }
+ memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN);
+
+ return 0;
+}
+
+static bool wil_is_rx_idle_edma(struct wil6210_priv *wil)
+{
+ struct wil_status_ring *sring;
+ struct wil_rx_status_extended msg1;
+ void *msg = &msg1;
+ u8 dr_bit;
+ int i;
+
+ for (i = 0; i < wil->num_rx_status_rings; i++) {
+ sring = &wil->srings[i];
+ if (!sring->va)
+ continue;
+
+ wil_get_next_rx_status_msg(sring, msg);
+ dr_bit = wil_rx_status_get_desc_rdy_bit(msg);
+
+ /* Check if there are unhandled RX status messages */
+ if (dr_bit == sring->desc_rdy_pol)
+ return false;
+ }
+
+ return true;
+}
+
+static void wil_rx_buf_len_init_edma(struct wil6210_priv *wil)
+{
+ wil->rx_buf_len = rx_large_buf ?
+ WIL_MAX_ETH_MTU : WIL_EDMA_RX_BUF_LEN_DEFAULT;
+}
+
+static int wil_rx_init_edma(struct wil6210_priv *wil, u16 desc_ring_size)
+{
+ u16 status_ring_size;
+ struct wil_ring *ring = &wil->ring_rx;
+ int rc;
+ size_t elem_size = wil->use_compressed_rx_status ?
+ sizeof(struct wil_rx_status_compressed) :
+ sizeof(struct wil_rx_status_extended);
+ int i;
+ u16 max_rx_pl_per_desc;
+
+ /* In SW reorder one must use extended status messages */
+ if (wil->use_compressed_rx_status && !wil->use_rx_hw_reordering) {
+ wil_err(wil,
+ "compressed RX status cannot be used with SW reorder\n");
+ return -EINVAL;
+ }
+
+ if (wil->rx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN ||
+ wil->rx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX)
+ wil->rx_status_ring_order = WIL_RX_SRING_SIZE_ORDER_DEFAULT;
+
+ status_ring_size = 1 << wil->rx_status_ring_order;
+
+ wil_dbg_misc(wil,
+ "rx_init, desc_ring_size=%u, status_ring_size=%u, elem_size=%zu\n",
+ desc_ring_size, status_ring_size, elem_size);
+
+ wil_rx_buf_len_init_edma(wil);
+
+ max_rx_pl_per_desc = ALIGN(wil->rx_buf_len, 4);
+
+ /* Use debugfs dbg_num_rx_srings if set, reserve one sring for TX */
+ if (wil->num_rx_status_rings > WIL6210_MAX_STATUS_RINGS - 1)
+ wil->num_rx_status_rings = WIL6210_MAX_STATUS_RINGS - 1;
+
+ wil_dbg_misc(wil, "rx_init: allocate %d status rings\n",
+ wil->num_rx_status_rings);
+
+ rc = wil_wmi_cfg_def_rx_offload(wil, max_rx_pl_per_desc);
+ if (rc)
+ return rc;
+
+ /* Allocate status ring */
+ for (i = 0; i < wil->num_rx_status_rings; i++) {
+ int sring_id = wil_find_free_sring(wil);
+
+ if (sring_id < 0) {
+ rc = -EFAULT;
+ goto err_free_status;
+ }
+ rc = wil_init_rx_sring(wil, status_ring_size, elem_size,
+ sring_id);
+ if (rc)
+ goto err_free_status;
+ }
+
+ /* Allocate descriptor ring */
+ rc = wil_init_rx_desc_ring(wil, desc_ring_size,
+ WIL_DEFAULT_RX_STATUS_RING_ID);
+ if (rc)
+ goto err_free_status;
+
+ if (wil->rx_buff_id_count >= status_ring_size) {
+ wil_info(wil,
+ "rx_buff_id_count %d exceeds sring_size %d. set it to %d\n",
+ wil->rx_buff_id_count, status_ring_size,
+ status_ring_size - 1);
+ wil->rx_buff_id_count = status_ring_size - 1;
+ }
+
+ /* Allocate Rx buffer array */
+ rc = wil_init_rx_buff_arr(wil, wil->rx_buff_id_count);
+ if (rc)
+ goto err_free_desc;
+
+ /* Fill descriptor ring with credits */
+ rc = wil_rx_refill_edma(wil);
+ if (rc)
+ goto err_free_rx_buff_arr;
+
+ return 0;
+err_free_rx_buff_arr:
+ wil_free_rx_buff_arr(wil);
+err_free_desc:
+ wil_ring_free_edma(wil, ring);
+err_free_status:
+ for (i = 0; i < wil->num_rx_status_rings; i++)
+ wil_sring_free(wil, &wil->srings[i]);
+
+ return rc;
+}
+
+static int wil_ring_init_tx_edma(struct wil6210_vif *vif, int ring_id,
+ int size, int cid, int tid)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ int rc;
+ struct wil_ring *ring = &wil->ring_tx[ring_id];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
+
+ lockdep_assert_held(&wil->mutex);
+
+ wil_dbg_misc(wil,
+ "init TX ring: ring_id=%u, cid=%u, tid=%u, sring_id=%u\n",
+ ring_id, cid, tid, wil->tx_sring_idx);
+
+ wil_tx_data_init(txdata);
+ ring->size = size;
+ rc = wil_ring_alloc_desc_ring(wil, ring);
+ if (rc)
+ goto out;
+
+ wil->ring2cid_tid[ring_id][0] = cid;
+ wil->ring2cid_tid[ring_id][1] = tid;
+ if (!vif->privacy)
+ txdata->dot1x_open = true;
+
+ rc = wil_wmi_tx_desc_ring_add(vif, ring_id, cid, tid);
+ if (rc) {
+ wil_err(wil, "WMI_TX_DESC_RING_ADD_CMD failed\n");
+ goto out_free;
+ }
+
+ if (txdata->dot1x_open && agg_wsize >= 0)
+ wil_addba_tx_request(wil, ring_id, agg_wsize);
+
+ return 0;
+ out_free:
+ spin_lock_bh(&txdata->lock);
+ txdata->dot1x_open = false;
+ txdata->enabled = 0;
+ spin_unlock_bh(&txdata->lock);
+ wil_ring_free_edma(wil, ring);
+ wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID;
+ wil->ring2cid_tid[ring_id][1] = 0;
+
+ out:
+ return rc;
+}
+
+/* This function is used only for RX SW reorder */
+static int wil_check_bar(struct wil6210_priv *wil, void *msg, int cid,
+ struct sk_buff *skb, struct wil_net_stats *stats)
+{
+ u8 ftype;
+ u8 fc1;
+ int mid;
+ int tid;
+ u16 seq;
+ struct wil6210_vif *vif;
+
+ ftype = wil_rx_status_get_frame_type(wil, msg);
+ if (ftype == IEEE80211_FTYPE_DATA)
+ return 0;
+
+ fc1 = wil_rx_status_get_fc1(wil, msg);
+ mid = wil_rx_status_get_mid(msg);
+ tid = wil_rx_status_get_tid(msg);
+ seq = le16_to_cpu(wil_rx_status_get_seq(wil, msg));
+ vif = wil->vifs[mid];
+
+ if (unlikely(!vif)) {
+ wil_dbg_txrx(wil, "RX descriptor with invalid mid %d", mid);
+ return -EAGAIN;
+ }
+
+ wil_dbg_txrx(wil,
+ "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
+ fc1, mid, cid, tid, seq);
+ if (stats)
+ stats->rx_non_data_frame++;
+ if (wil_is_back_req(fc1)) {
+ wil_dbg_txrx(wil,
+ "BAR: MID %d CID %d TID %d Seq 0x%03x\n",
+ mid, cid, tid, seq);
+ wil_rx_bar(wil, vif, cid, tid, seq);
+ } else {
+ u32 sz = wil->use_compressed_rx_status ?
+ sizeof(struct wil_rx_status_compressed) :
+ sizeof(struct wil_rx_status_extended);
+
+ /* print again all info. One can enable only this
+ * without overhead for printing every Rx frame
+ */
+ wil_dbg_txrx(wil,
+ "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
+ fc1, mid, cid, tid, seq);
+ wil_hex_dump_txrx("RxS ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)msg, sz, false);
+ wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb_headlen(skb), false);
+ }
+
+ return -EAGAIN;
+}
+
+static int wil_rx_error_check_edma(struct wil6210_priv *wil,
+ struct sk_buff *skb,
+ struct wil_net_stats *stats)
+{
+ int error;
+ int l2_rx_status;
+ int l3_rx_status;
+ int l4_rx_status;
+ void *msg = wil_skb_rxstatus(skb);
+
+ error = wil_rx_status_get_error(msg);
+ if (!error) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ return 0;
+ }
+
+ l2_rx_status = wil_rx_status_get_l2_rx_status(msg);
+ if (l2_rx_status != 0) {
+ wil_dbg_txrx(wil, "L2 RX error, l2_rx_status=0x%x\n",
+ l2_rx_status);
+ /* Due to HW issue, KEY error will trigger a MIC error */
+ if (l2_rx_status & WIL_RX_EDMA_ERROR_MIC) {
+ wil_dbg_txrx(wil,
+ "L2 MIC/KEY error, dropping packet\n");
+ stats->rx_mic_error++;
+ }
+ if (l2_rx_status & WIL_RX_EDMA_ERROR_KEY) {
+ wil_dbg_txrx(wil, "L2 KEY error, dropping packet\n");
+ stats->rx_key_error++;
+ }
+ if (l2_rx_status & WIL_RX_EDMA_ERROR_REPLAY) {
+ wil_dbg_txrx(wil,
+ "L2 REPLAY error, dropping packet\n");
+ stats->rx_replay++;
+ }
+ if (l2_rx_status & WIL_RX_EDMA_ERROR_AMSDU) {
+ wil_dbg_txrx(wil,
+ "L2 AMSDU error, dropping packet\n");
+ stats->rx_amsdu_error++;
+ }
+ return -EFAULT;
+ }
+
+ l3_rx_status = wil_rx_status_get_l3_rx_status(msg);
+ l4_rx_status = wil_rx_status_get_l4_rx_status(msg);
+ if (!l3_rx_status && !l4_rx_status)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* If HW reports bad checksum, let IP stack re-check it
+ * For example, HW don't understand Microsoft IP stack that
+ * mis-calculates TCP checksum - if it should be 0x0,
+ * it writes 0xffff in violation of RFC 1624
+ */
+ else
+ stats->rx_csum_err++;
+
+ return 0;
+}
+
+static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil,
+ struct wil_status_ring *sring)
+{
+ struct device *dev = wil_to_dev(wil);
+ struct wil_rx_status_extended msg1;
+ void *msg = &msg1;
+ u16 buff_id;
+ struct sk_buff *skb;
+ dma_addr_t pa;
+ struct wil_ring_rx_data *rxdata = &sring->rx_data;
+ unsigned int sz = ALIGN(wil->rx_buf_len, 4);
+ struct wil_net_stats *stats = NULL;
+ u16 dmalen;
+ int cid;
+ bool eop, headstolen;
+ int delta;
+ u8 dr_bit;
+ u8 data_offset;
+ struct wil_rx_status_extended *s;
+ u16 sring_idx = sring - wil->srings;
+
+ BUILD_BUG_ON(sizeof(struct wil_rx_status_extended) > sizeof(skb->cb));
+
+again:
+ wil_get_next_rx_status_msg(sring, msg);
+ dr_bit = wil_rx_status_get_desc_rdy_bit(msg);
+
+ /* Completed handling all the ready status messages */
+ if (dr_bit != sring->desc_rdy_pol)
+ return NULL;
+
+ /* Extract the buffer ID from the status message */
+ buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg));
+ if (unlikely(!wil_val_in_range(buff_id, 0, wil->rx_buff_mgmt.size))) {
+ wil_err(wil, "Corrupt buff_id=%d, sring->swhead=%d\n",
+ buff_id, sring->swhead);
+ wil_sring_advance_swhead(sring);
+ goto again;
+ }
+
+ wil_sring_advance_swhead(sring);
+
+ /* Extract the SKB from the rx_buff management array */
+ skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
+ wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL;
+ if (!skb) {
+ wil_err(wil, "No Rx skb at buff_id %d\n", buff_id);
+ goto again;
+ }
+
+ memcpy(&pa, skb->cb, sizeof(pa));
+ dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
+ dmalen = le16_to_cpu(wil_rx_status_get_length(msg));
+
+ trace_wil6210_rx_status(wil, wil->use_compressed_rx_status, buff_id,
+ msg);
+ wil_dbg_txrx(wil, "Rx, buff_id=%u, sring_idx=%u, dmalen=%u bytes\n",
+ buff_id, sring_idx, dmalen);
+ wil_hex_dump_txrx("RxS ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)msg, wil->use_compressed_rx_status ?
+ sizeof(struct wil_rx_status_compressed) :
+ sizeof(struct wil_rx_status_extended), false);
+
+ /* Move the buffer from the active list to the free list */
+ list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
+ &wil->rx_buff_mgmt.free);
+
+ eop = wil_rx_status_get_eop(msg);
+
+ cid = wil_rx_status_get_cid(msg);
+ if (unlikely(!wil_val_in_range(cid, 0, WIL6210_MAX_CID))) {
+ wil_err(wil, "Corrupt cid=%d, sring->swhead=%d\n",
+ cid, sring->swhead);
+ rxdata->skipping = true;
+ goto skipping;
+ }
+ stats = &wil->sta[cid].stats;
+
+ if (unlikely(skb->len < ETH_HLEN)) {
+ wil_dbg_txrx(wil, "Short frame, len = %d\n", skb->len);
+ stats->rx_short_frame++;
+ rxdata->skipping = true;
+ goto skipping;
+ }
+
+ if (unlikely(dmalen > sz)) {
+ wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
+ stats->rx_large_frame++;
+ rxdata->skipping = true;
+ }
+
+skipping:
+ /* skipping indicates if a certain SKB should be dropped.
+ * It is set in case there is an error on the current SKB or in case
+ * of RX chaining: as long as we manage to merge the SKBs it will
+ * be false. once we have a bad SKB or we don't manage to merge SKBs
+ * it will be set to the !EOP value of the current SKB.
+ * This guarantees that all the following SKBs until EOP will also
+ * get dropped.
+ */
+ if (unlikely(rxdata->skipping)) {
+ kfree_skb(skb);
+ if (rxdata->skb) {
+ kfree_skb(rxdata->skb);
+ rxdata->skb = NULL;
+ }
+ rxdata->skipping = !eop;
+ goto again;
+ }
+
+ skb_trim(skb, dmalen);
+
+ prefetch(skb->data);
+
+ if (!rxdata->skb) {
+ rxdata->skb = skb;
+ } else {
+ if (likely(skb_try_coalesce(rxdata->skb, skb, &headstolen,
+ &delta))) {
+ kfree_skb_partial(skb, headstolen);
+ } else {
+ wil_err(wil, "failed to merge skbs!\n");
+ kfree_skb(skb);
+ kfree_skb(rxdata->skb);
+ rxdata->skb = NULL;
+ rxdata->skipping = !eop;
+ goto again;
+ }
+ }
+
+ if (!eop)
+ goto again;
+
+ /* reaching here rxdata->skb always contains a full packet */
+ skb = rxdata->skb;
+ rxdata->skb = NULL;
+ rxdata->skipping = false;
+
+ if (stats) {
+ stats->last_mcs_rx = wil_rx_status_get_mcs(msg);
+ if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
+ stats->rx_per_mcs[stats->last_mcs_rx]++;
+ }
+
+ if (!wil->use_rx_hw_reordering && !wil->use_compressed_rx_status &&
+ wil_check_bar(wil, msg, cid, skb, stats) == -EAGAIN) {
+ kfree_skb(skb);
+ goto again;
+ }
+
+ /* Compensate for the HW data alignment according to the status
+ * message
+ */
+ data_offset = wil_rx_status_get_data_offset(msg);
+ if (data_offset == 0xFF ||
+ data_offset > WIL_EDMA_MAX_DATA_OFFSET) {
+ wil_err(wil, "Unexpected data offset %d\n", data_offset);
+ kfree_skb(skb);
+ goto again;
+ }
+
+ skb_pull(skb, data_offset);
+
+ wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb_headlen(skb), false);
+
+ /* Has to be done after dma_unmap_single as skb->cb is also
+ * used for holding the pa
+ */
+ s = wil_skb_rxstatus(skb);
+ memcpy(s, msg, sring->elem_size);
+
+ return skb;
+}
+
+void wil_rx_handle_edma(struct wil6210_priv *wil, int *quota)
+{
+ struct net_device *ndev;
+ struct wil_ring *ring = &wil->ring_rx;
+ struct wil_status_ring *sring;
+ struct sk_buff *skb;
+ int i;
+
+ if (unlikely(!ring->va)) {
+ wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
+ return;
+ }
+ wil_dbg_txrx(wil, "rx_handle\n");
+
+ for (i = 0; i < wil->num_rx_status_rings; i++) {
+ sring = &wil->srings[i];
+ if (unlikely(!sring->va)) {
+ wil_err(wil,
+ "Rx IRQ while Rx status ring %d not yet initialized\n",
+ i);
+ continue;
+ }
+
+ while ((*quota > 0) &&
+ (NULL != (skb =
+ wil_sring_reap_rx_edma(wil, sring)))) {
+ (*quota)--;
+ if (wil->use_rx_hw_reordering) {
+ void *msg = wil_skb_rxstatus(skb);
+ int mid = wil_rx_status_get_mid(msg);
+ struct wil6210_vif *vif = wil->vifs[mid];
+
+ if (unlikely(!vif)) {
+ wil_dbg_txrx(wil,
+ "RX desc invalid mid %d",
+ mid);
+ kfree_skb(skb);
+ continue;
+ }
+ ndev = vif_to_ndev(vif);
+ wil_netif_rx_any(skb, ndev);
+ } else {
+ wil_rx_reorder(wil, skb);
+ }
+ }
+
+ wil_w(wil, sring->hwtail, (sring->swhead - 1) % sring->size);
+ }
+
+ wil_rx_refill_edma(wil);
+}
+
+static int wil_tx_desc_map_edma(union wil_tx_desc *desc,
+ dma_addr_t pa,
+ u32 len,
+ int ring_index)
+{
+ struct wil_tx_enhanced_desc *d =
+ (struct wil_tx_enhanced_desc *)&desc->enhanced;
+
+ memset(d, 0, sizeof(struct wil_tx_enhanced_desc));
+
+ wil_desc_set_addr_edma(&d->dma.addr, &d->dma.addr_high_high, pa);
+
+ /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
+ d->dma.length = cpu_to_le16((u16)len);
+ d->mac.d[0] = (ring_index << WIL_EDMA_DESC_TX_MAC_CFG_0_QID_POS);
+ /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi;
+ * 3 - eth mode
+ */
+ d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
+ (0x3 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
+
+ return 0;
+}
+
+static inline void
+wil_get_next_tx_status_msg(struct wil_status_ring *sring,
+ struct wil_ring_tx_status *msg)
+{
+ struct wil_ring_tx_status *_msg = (struct wil_ring_tx_status *)
+ (sring->va + (sring->elem_size * sring->swhead));
+
+ *msg = *_msg;
+}
+
+/**
+ * Clean up transmitted skb's from the Tx descriptor RING.
+ * Return number of descriptors cleared.
+ */
+int wil_tx_sring_handler(struct wil6210_priv *wil,
+ struct wil_status_ring *sring)
+{
+ struct net_device *ndev;
+ struct device *dev = wil_to_dev(wil);
+ struct wil_ring *ring = NULL;
+ struct wil_ring_tx_data *txdata;
+ /* Total number of completed descriptors in all descriptor rings */
+ int desc_cnt = 0;
+ int cid;
+ struct wil_net_stats *stats = NULL;
+ struct wil_tx_enhanced_desc *_d;
+ unsigned int ring_id;
+ unsigned int num_descs;
+ int i;
+ u8 dr_bit; /* Descriptor Ready bit */
+ struct wil_ring_tx_status msg;
+ struct wil6210_vif *vif;
+ int used_before_complete;
+ int used_new;
+
+ wil_get_next_tx_status_msg(sring, &msg);
+ dr_bit = msg.desc_ready >> TX_STATUS_DESC_READY_POS;
+
+ /* Process completion messages while DR bit has the expected polarity */
+ while (dr_bit == sring->desc_rdy_pol) {
+ num_descs = msg.num_descriptors;
+ if (!num_descs) {
+ wil_err(wil, "invalid num_descs 0\n");
+ goto again;
+ }
+
+ /* Find the corresponding descriptor ring */
+ ring_id = msg.ring_id;
+
+ if (unlikely(ring_id >= WIL6210_MAX_TX_RINGS)) {
+ wil_err(wil, "invalid ring id %d\n", ring_id);
+ goto again;
+ }
+ ring = &wil->ring_tx[ring_id];
+ if (unlikely(!ring->va)) {
+ wil_err(wil, "Tx irq[%d]: ring not initialized\n",
+ ring_id);
+ goto again;
+ }
+ txdata = &wil->ring_tx_data[ring_id];
+ if (unlikely(!txdata->enabled)) {
+ wil_info(wil, "Tx irq[%d]: ring disabled\n", ring_id);
+ goto again;
+ }
+ vif = wil->vifs[txdata->mid];
+ if (unlikely(!vif)) {
+ wil_dbg_txrx(wil, "invalid MID %d for ring %d\n",
+ txdata->mid, ring_id);
+ goto again;
+ }
+
+ ndev = vif_to_ndev(vif);
+
+ cid = wil->ring2cid_tid[ring_id][0];
+ if (cid < WIL6210_MAX_CID)
+ stats = &wil->sta[cid].stats;
+
+ wil_dbg_txrx(wil,
+ "tx_status: completed desc_ring (%d), num_descs (%d)\n",
+ ring_id, num_descs);
+
+ used_before_complete = wil_ring_used_tx(ring);
+
+ for (i = 0 ; i < num_descs; ++i) {
+ struct wil_ctx *ctx = &ring->ctx[ring->swtail];
+ struct wil_tx_enhanced_desc dd, *d = &dd;
+ u16 dmalen;
+ struct sk_buff *skb = ctx->skb;
+
+ _d = (struct wil_tx_enhanced_desc *)
+ &ring->va[ring->swtail].tx.enhanced;
+ *d = *_d;
+
+ dmalen = le16_to_cpu(d->dma.length);
+ trace_wil6210_tx_status(&msg, ring->swtail, dmalen);
+ wil_dbg_txrx(wil,
+ "TxC[%2d][%3d] : %d bytes, status 0x%02x\n",
+ ring_id, ring->swtail, dmalen,
+ msg.status);
+ wil_hex_dump_txrx("TxS ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)&msg, sizeof(msg),
+ false);
+
+ wil_tx_desc_unmap_edma(dev,
+ (union wil_tx_desc *)d,
+ ctx);
+
+ if (skb) {
+ if (likely(msg.status == 0)) {
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+ if (stats) {
+ stats->tx_packets++;
+ stats->tx_bytes += skb->len;
+
+ wil_tx_latency_calc(wil, skb,
+ &wil->sta[cid]);
+ }
+ } else {
+ ndev->stats.tx_errors++;
+ if (stats)
+ stats->tx_errors++;
+ }
+ wil_consume_skb(skb, msg.status == 0);
+ }
+ memset(ctx, 0, sizeof(*ctx));
+ /* Make sure the ctx is zeroed before updating the tail
+ * to prevent a case where wil_tx_ring will see
+ * this descriptor as used and handle it before ctx zero
+ * is completed.
+ */
+ wmb();
+
+ ring->swtail = wil_ring_next_tail(ring);
+
+ desc_cnt++;
+ }
+
+ /* performance monitoring */
+ used_new = wil_ring_used_tx(ring);
+ if (wil_val_in_range(wil->ring_idle_trsh,
+ used_new, used_before_complete)) {
+ wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
+ ring_id, used_before_complete, used_new);
+ txdata->last_idle = get_cycles();
+ }
+
+again:
+ wil_sring_advance_swhead(sring);
+
+ wil_get_next_tx_status_msg(sring, &msg);
+ dr_bit = msg.desc_ready >> TX_STATUS_DESC_READY_POS;
+ }
+
+ /* shall we wake net queues? */
+ if (desc_cnt)
+ wil_update_net_queues(wil, vif, NULL, false);
+
+ /* Update the HW tail ptr (RD ptr) */
+ wil_w(wil, sring->hwtail, (sring->swhead - 1) % sring->size);
+
+ return desc_cnt;
+}
+
+/**
+ * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
+ * @skb is used to obtain the protocol and headers length.
+ * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
+ * 2 - middle, 3 - last descriptor.
+ */
+static void wil_tx_desc_offload_setup_tso_edma(struct wil_tx_enhanced_desc *d,
+ int tso_desc_type, bool is_ipv4,
+ int tcp_hdr_len,
+ int skb_net_hdr_len,
+ int mss)
+{
+ /* Number of descriptors */
+ d->mac.d[2] |= 1;
+ /* Maximum Segment Size */
+ d->mac.tso_mss |= cpu_to_le16(mss >> 2);
+ /* L4 header len: TCP header length */
+ d->dma.l4_hdr_len |= tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK;
+ /* EOP, TSO desc type, Segmentation enable,
+ * Insert IPv4 and TCP / UDP Checksum
+ */
+ d->dma.cmd |= BIT(WIL_EDMA_DESC_TX_CFG_EOP_POS) |
+ tso_desc_type << WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_POS |
+ BIT(WIL_EDMA_DESC_TX_CFG_SEG_EN_POS) |
+ BIT(WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_POS) |
+ BIT(WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_POS);
+ /* Calculate pseudo-header */
+ d->dma.w1 |= BIT(WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_POS) |
+ BIT(WIL_EDMA_DESC_TX_CFG_L4_TYPE_POS);
+ /* IP Header Length */
+ d->dma.ip_length |= skb_net_hdr_len;
+ /* MAC header length and IP address family*/
+ d->dma.b11 |= ETH_HLEN |
+ is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
+}
+
+static int wil_tx_tso_gen_desc(struct wil6210_priv *wil, void *buff_addr,
+ int len, uint i, int tso_desc_type,
+ skb_frag_t *frag, struct wil_ring *ring,
+ struct sk_buff *skb, bool is_ipv4,
+ int tcp_hdr_len, int skb_net_hdr_len,
+ int mss, int *descs_used)
+{
+ struct device *dev = wil_to_dev(wil);
+ struct wil_tx_enhanced_desc *_desc = (struct wil_tx_enhanced_desc *)
+ &ring->va[i].tx.enhanced;
+ struct wil_tx_enhanced_desc desc_mem, *d = &desc_mem;
+ int ring_index = ring - wil->ring_tx;
+ dma_addr_t pa;
+
+ if (len == 0)
+ return 0;
+
+ if (!frag) {
+ pa = dma_map_single(dev, buff_addr, len, DMA_TO_DEVICE);
+ ring->ctx[i].mapped_as = wil_mapped_as_single;
+ } else {
+ pa = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
+ ring->ctx[i].mapped_as = wil_mapped_as_page;
+ }
+ if (unlikely(dma_mapping_error(dev, pa))) {
+ wil_err(wil, "TSO: Skb DMA map error\n");
+ return -EINVAL;
+ }
+
+ wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, pa,
+ len, ring_index);
+ wil_tx_desc_offload_setup_tso_edma(d, tso_desc_type, is_ipv4,
+ tcp_hdr_len,
+ skb_net_hdr_len, mss);
+
+ /* hold reference to skb
+ * to prevent skb release before accounting
+ * in case of immediate "tx done"
+ */
+ if (tso_desc_type == wil_tso_type_lst)
+ ring->ctx[i].skb = skb_get(skb);
+
+ wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
+
+ *_desc = *d;
+ (*descs_used)++;
+
+ return 0;
+}
+
+static int __wil_tx_ring_tso_edma(struct wil6210_priv *wil,
+ struct wil6210_vif *vif,
+ struct wil_ring *ring,
+ struct sk_buff *skb)
+{
+ int ring_index = ring - wil->ring_tx;
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ int min_desc_required = nr_frags + 2; /* Headers, Head, Fragments */
+ int used, avail = wil_ring_avail_tx(ring);
+ int f, hdrlen, headlen;
+ int gso_type;
+ bool is_ipv4;
+ u32 swhead = ring->swhead;
+ int descs_used = 0; /* total number of used descriptors */
+ int rc = -EINVAL;
+ int tcp_hdr_len;
+ int skb_net_hdr_len;
+ int mss = skb_shinfo(skb)->gso_size;
+
+ wil_dbg_txrx(wil, "tx_ring_tso: %d bytes to ring %d\n", skb->len,
+ ring_index);
+
+ if (unlikely(!txdata->enabled))
+ return -EINVAL;
+
+ if (unlikely(avail < min_desc_required)) {
+ wil_err_ratelimited(wil,
+ "TSO: Tx ring[%2d] full. No space for %d fragments\n",
+ ring_index, min_desc_required);
+ return -ENOMEM;
+ }
+
+ gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
+ switch (gso_type) {
+ case SKB_GSO_TCPV4:
+ is_ipv4 = true;
+ break;
+ case SKB_GSO_TCPV6:
+ is_ipv4 = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return -EINVAL;
+
+ /* tcp header length and skb network header length are fixed for all
+ * packet's descriptors - read them once here
+ */
+ tcp_hdr_len = tcp_hdrlen(skb);
+ skb_net_hdr_len = skb_network_header_len(skb);
+
+ /* First descriptor must contain the header only
+ * Header Length = MAC header len + IP header len + TCP header len
+ */
+ hdrlen = ETH_HLEN + tcp_hdr_len + skb_net_hdr_len;
+ wil_dbg_txrx(wil, "TSO: process header descriptor, hdrlen %u\n",
+ hdrlen);
+ rc = wil_tx_tso_gen_desc(wil, skb->data, hdrlen, swhead,
+ wil_tso_type_hdr, NULL, ring, skb,
+ is_ipv4, tcp_hdr_len, skb_net_hdr_len,
+ mss, &descs_used);
+ if (rc)
+ return -EINVAL;
+
+ /* Second descriptor contains the head */
+ headlen = skb_headlen(skb) - hdrlen;
+ wil_dbg_txrx(wil, "TSO: process skb head, headlen %u\n", headlen);
+ rc = wil_tx_tso_gen_desc(wil, skb->data + hdrlen, headlen,
+ (swhead + descs_used) % ring->size,
+ (nr_frags != 0) ? wil_tso_type_first :
+ wil_tso_type_lst, NULL, ring, skb,
+ is_ipv4, tcp_hdr_len, skb_net_hdr_len,
+ mss, &descs_used);
+ if (rc)
+ goto mem_error;
+
+ /* Rest of the descriptors are from the SKB fragments */
+ for (f = 0; f < nr_frags; f++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
+ int len = frag->size;
+
+ wil_dbg_txrx(wil, "TSO: frag[%d]: len %u, descs_used %d\n", f,
+ len, descs_used);
+
+ rc = wil_tx_tso_gen_desc(wil, NULL, len,
+ (swhead + descs_used) % ring->size,
+ (f != nr_frags - 1) ?
+ wil_tso_type_mid : wil_tso_type_lst,
+ frag, ring, skb, is_ipv4,
+ tcp_hdr_len, skb_net_hdr_len,
+ mss, &descs_used);
+ if (rc)
+ goto mem_error;
+ }
+
+ /* performance monitoring */
+ used = wil_ring_used_tx(ring);
+ if (wil_val_in_range(wil->ring_idle_trsh,
+ used, used + descs_used)) {
+ txdata->idle += get_cycles() - txdata->last_idle;
+ wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
+ ring_index, used, used + descs_used);
+ }
+
+ /* advance swhead */
+ wil_ring_advance_head(ring, descs_used);
+ wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, ring->swhead);
+
+ /* make sure all writes to descriptors (shared memory) are done before
+ * committing them to HW
+ */
+ wmb();
+
+ if (wil->tx_latency)
+ *(ktime_t *)&skb->cb = ktime_get();
+ else
+ memset(skb->cb, 0, sizeof(ktime_t));
+
+ wil_w(wil, ring->hwtail, ring->swhead);
+
+ return 0;
+
+mem_error:
+ while (descs_used > 0) {
+ struct device *dev = wil_to_dev(wil);
+ struct wil_ctx *ctx;
+ int i = (swhead + descs_used - 1) % ring->size;
+ struct wil_tx_enhanced_desc dd, *d = &dd;
+ struct wil_tx_enhanced_desc *_desc =
+ (struct wil_tx_enhanced_desc *)
+ &ring->va[i].tx.enhanced;
+
+ *d = *_desc;
+ ctx = &ring->ctx[i];
+ wil_tx_desc_unmap_edma(dev, (union wil_tx_desc *)d, ctx);
+ memset(ctx, 0, sizeof(*ctx));
+ descs_used--;
+ }
+ return rc;
+}
+
+static int wil_ring_init_bcast_edma(struct wil6210_vif *vif, int ring_id,
+ int size)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wil_ring *ring = &wil->ring_tx[ring_id];
+ int rc;
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
+
+ wil_dbg_misc(wil, "init bcast: ring_id=%d, sring_id=%d\n",
+ ring_id, wil->tx_sring_idx);
+
+ lockdep_assert_held(&wil->mutex);
+
+ wil_tx_data_init(txdata);
+ ring->size = size;
+ ring->is_rx = false;
+ rc = wil_ring_alloc_desc_ring(wil, ring);
+ if (rc)
+ goto out;
+
+ wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID; /* CID */
+ wil->ring2cid_tid[ring_id][1] = 0; /* TID */
+ if (!vif->privacy)
+ txdata->dot1x_open = true;
+
+ rc = wil_wmi_bcast_desc_ring_add(vif, ring_id);
+ if (rc)
+ goto out_free;
+
+ return 0;
+
+ out_free:
+ spin_lock_bh(&txdata->lock);
+ txdata->enabled = 0;
+ txdata->dot1x_open = false;
+ spin_unlock_bh(&txdata->lock);
+ wil_ring_free_edma(wil, ring);
+
+out:
+ return rc;
+}
+
+static void wil_tx_fini_edma(struct wil6210_priv *wil)
+{
+ struct wil_status_ring *sring = &wil->srings[wil->tx_sring_idx];
+
+ wil_dbg_misc(wil, "free TX sring\n");
+
+ wil_sring_free(wil, sring);
+}
+
+static void wil_rx_data_free(struct wil_status_ring *sring)
+{
+ if (!sring)
+ return;
+
+ kfree_skb(sring->rx_data.skb);
+ sring->rx_data.skb = NULL;
+}
+
+static void wil_rx_fini_edma(struct wil6210_priv *wil)
+{
+ struct wil_ring *ring = &wil->ring_rx;
+ int i;
+
+ wil_dbg_misc(wil, "rx_fini_edma\n");
+
+ wil_ring_free_edma(wil, ring);
+
+ for (i = 0; i < wil->num_rx_status_rings; i++) {
+ wil_rx_data_free(&wil->srings[i]);
+ wil_sring_free(wil, &wil->srings[i]);
+ }
+
+ wil_free_rx_buff_arr(wil);
+}
+
+void wil_init_txrx_ops_edma(struct wil6210_priv *wil)
+{
+ wil->txrx_ops.configure_interrupt_moderation =
+ wil_configure_interrupt_moderation_edma;
+ /* TX ops */
+ wil->txrx_ops.ring_init_tx = wil_ring_init_tx_edma;
+ wil->txrx_ops.ring_fini_tx = wil_ring_free_edma;
+ wil->txrx_ops.ring_init_bcast = wil_ring_init_bcast_edma;
+ wil->txrx_ops.tx_init = wil_tx_init_edma;
+ wil->txrx_ops.tx_fini = wil_tx_fini_edma;
+ wil->txrx_ops.tx_desc_map = wil_tx_desc_map_edma;
+ wil->txrx_ops.tx_desc_unmap = wil_tx_desc_unmap_edma;
+ wil->txrx_ops.tx_ring_tso = __wil_tx_ring_tso_edma;
+ /* RX ops */
+ wil->txrx_ops.rx_init = wil_rx_init_edma;
+ wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp_edma;
+ wil->txrx_ops.get_reorder_params = wil_get_reorder_params_edma;
+ wil->txrx_ops.get_netif_rx_params = wil_get_netif_rx_params_edma;
+ wil->txrx_ops.rx_crypto_check = wil_rx_crypto_check_edma;
+ wil->txrx_ops.rx_error_check = wil_rx_error_check_edma;
+ wil->txrx_ops.is_rx_idle = wil_is_rx_idle_edma;
+ wil->txrx_ops.rx_fini = wil_rx_fini_edma;
+}
+
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h
new file mode 100644
index 000000000000..a7fe9292fda3
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h
@@ -0,0 +1,568 @@
+/*
+ * Copyright (c) 2012-2016,2018, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef WIL6210_TXRX_EDMA_H
+#define WIL6210_TXRX_EDMA_H
+
+#include "wil6210.h"
+
+/* limit status ring size in range [ring size..max ring size] */
+#define WIL_SRING_SIZE_ORDER_MIN (WIL_RING_SIZE_ORDER_MIN)
+#define WIL_SRING_SIZE_ORDER_MAX (WIL_RING_SIZE_ORDER_MAX)
+/* RX sring order should be bigger than RX ring order */
+#define WIL_RX_SRING_SIZE_ORDER_DEFAULT (11)
+#define WIL_TX_SRING_SIZE_ORDER_DEFAULT (12)
+#define WIL_RX_BUFF_ARR_SIZE_DEFAULT (1536)
+
+#define WIL_DEFAULT_RX_STATUS_RING_ID 0
+#define WIL_RX_DESC_RING_ID 0
+#define WIL_RX_STATUS_IRQ_IDX 0
+#define WIL_TX_STATUS_IRQ_IDX 1
+
+#define WIL_EDMA_AGG_WATERMARK (0xffff)
+#define WIL_EDMA_AGG_WATERMARK_POS (16)
+
+#define WIL_EDMA_IDLE_TIME_LIMIT_USEC (50)
+#define WIL_EDMA_TIME_UNIT_CLK_CYCLES (330) /* fits 1 usec */
+
+/* Error field */
+#define WIL_RX_EDMA_ERROR_MIC (1)
+#define WIL_RX_EDMA_ERROR_KEY (2) /* Key missing */
+#define WIL_RX_EDMA_ERROR_REPLAY (3)
+#define WIL_RX_EDMA_ERROR_AMSDU (4)
+#define WIL_RX_EDMA_ERROR_FCS (7)
+
+#define WIL_RX_EDMA_ERROR_L3_ERR (BIT(0) | BIT(1))
+#define WIL_RX_EDMA_ERROR_L4_ERR (BIT(0) | BIT(1))
+
+#define WIL_RX_EDMA_DLPF_LU_MISS_BIT BIT(11)
+#define WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK 0x7
+#define WIL_RX_EDMA_DLPF_LU_HIT_CID_TID_MASK 0xf
+
+#define WIL_RX_EDMA_DLPF_LU_MISS_CID_POS 2
+#define WIL_RX_EDMA_DLPF_LU_HIT_CID_POS 4
+
+#define WIL_RX_EDMA_DLPF_LU_MISS_TID_POS 5
+
+#define WIL_RX_EDMA_MID_VALID_BIT BIT(22)
+
+#define WIL_EDMA_DESC_TX_MAC_CFG_0_QID_POS 16
+#define WIL_EDMA_DESC_TX_MAC_CFG_0_QID_LEN 6
+
+#define WIL_EDMA_DESC_TX_CFG_EOP_POS 0
+#define WIL_EDMA_DESC_TX_CFG_EOP_LEN 1
+
+#define WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_POS 3
+#define WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_LEN 2
+
+#define WIL_EDMA_DESC_TX_CFG_SEG_EN_POS 5
+#define WIL_EDMA_DESC_TX_CFG_SEG_EN_LEN 1
+
+#define WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_POS 6
+#define WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_LEN 1
+
+#define WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_POS 7
+#define WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_LEN 1
+
+#define WIL_EDMA_DESC_TX_CFG_L4_TYPE_POS 15
+#define WIL_EDMA_DESC_TX_CFG_L4_TYPE_LEN 1
+
+#define WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_POS 5
+#define WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_LEN 1
+
+/* Enhanced Rx descriptor - MAC part
+ * [dword 0] : Reserved
+ * [dword 1] : Reserved
+ * [dword 2] : Reserved
+ * [dword 3]
+ * bit 0..15 : Buffer ID
+ * bit 16..31 : Reserved
+ */
+struct wil_ring_rx_enhanced_mac {
+ u32 d[3];
+ __le16 buff_id;
+ u16 reserved;
+} __packed;
+
+/* Enhanced Rx descriptor - DMA part
+ * [dword 0] - Reserved
+ * [dword 1]
+ * bit 0..31 : addr_low:32 The payload buffer address, bits 0-31
+ * [dword 2]
+ * bit 0..15 : addr_high_low:16 The payload buffer address, bits 32-47
+ * bit 16..31 : Reserved
+ * [dword 3]
+ * bit 0..15 : addr_high_high:16 The payload buffer address, bits 48-63
+ * bit 16..31 : length
+ */
+struct wil_ring_rx_enhanced_dma {
+ u32 d0;
+ struct wil_ring_dma_addr addr;
+ u16 w5;
+ __le16 addr_high_high;
+ __le16 length;
+} __packed;
+
+struct wil_rx_enhanced_desc {
+ struct wil_ring_rx_enhanced_mac mac;
+ struct wil_ring_rx_enhanced_dma dma;
+} __packed;
+
+/* Enhanced Tx descriptor - DMA part
+ * [dword 0]
+ * Same as legacy
+ * [dword 1]
+ * bit 0..31 : addr_low:32 The payload buffer address, bits 0-31
+ * [dword 2]
+ * bit 0..15 : addr_high_low:16 The payload buffer address, bits 32-47
+ * bit 16..23 : ip_length:8 The IP header length for the TX IP checksum
+ * offload feature
+ * bit 24..30 : mac_length:7
+ * bit 31 : ip_version:1 1 - IPv4, 0 - IPv6
+ * [dword 3]
+ * bit 0..15 : addr_high_high:16 The payload buffer address, bits 48-63
+ * bit 16..31 : length
+ */
+struct wil_ring_tx_enhanced_dma {
+ u8 l4_hdr_len;
+ u8 cmd;
+ u16 w1;
+ struct wil_ring_dma_addr addr;
+ u8 ip_length;
+ u8 b11; /* 0..6: mac_length; 7:ip_version */
+ __le16 addr_high_high;
+ __le16 length;
+} __packed;
+
+/* Enhanced Tx descriptor - MAC part
+ * [dword 0]
+ * bit 0.. 9 : lifetime_expiry_value:10
+ * bit 10 : interrupt_en:1
+ * bit 11 : status_en:1
+ * bit 12..13 : txss_override:2
+ * bit 14 : timestamp_insertion:1
+ * bit 15 : duration_preserve:1
+ * bit 16..21 : reserved0:6
+ * bit 22..26 : mcs_index:5
+ * bit 27 : mcs_en:1
+ * bit 28..30 : reserved1:3
+ * bit 31 : sn_preserved:1
+ * [dword 1]
+ * bit 0.. 3 : pkt_mode:4
+ * bit 4 : pkt_mode_en:1
+ * bit 5..14 : reserved0:10
+ * bit 15 : ack_policy_en:1
+ * bit 16..19 : dst_index:4
+ * bit 20 : dst_index_en:1
+ * bit 21..22 : ack_policy:2
+ * bit 23 : lifetime_en:1
+ * bit 24..30 : max_retry:7
+ * bit 31 : max_retry_en:1
+ * [dword 2]
+ * bit 0.. 7 : num_of_descriptors:8
+ * bit 8..17 : reserved:10
+ * bit 18..19 : l2_translation_type:2 00 - bypass, 01 - 802.3, 10 - 802.11
+ * bit 20 : snap_hdr_insertion_en:1
+ * bit 21 : vlan_removal_en:1
+ * bit 22..23 : reserved0:2
+ * bit 24 : Dest ID extension:1
+ * bit 25..31 : reserved0:7
+ * [dword 3]
+ * bit 0..15 : tso_mss:16
+ * bit 16..31 : descriptor_scratchpad:16 - mailbox between driver and ucode
+ */
+struct wil_ring_tx_enhanced_mac {
+ u32 d[3];
+ __le16 tso_mss;
+ u16 scratchpad;
+} __packed;
+
+struct wil_tx_enhanced_desc {
+ struct wil_ring_tx_enhanced_mac mac;
+ struct wil_ring_tx_enhanced_dma dma;
+} __packed;
+
+#define TX_STATUS_DESC_READY_POS 7
+
+/* Enhanced TX status message
+ * [dword 0]
+ * bit 0.. 7 : Number of Descriptor:8 - The number of descriptors that
+ * are used to form the packets. It is needed for WB when
+ * releasing the packet
+ * bit 8..15 : tx_ring_id:8 The transmission ring ID that is related to
+ * the message
+ * bit 16..23 : Status:8 - The TX status Code
+ * 0x0 - A successful transmission
+ * 0x1 - Retry expired
+ * 0x2 - Lifetime Expired
+ * 0x3 - Released
+ * 0x4-0xFF - Reserved
+ * bit 24..30 : Reserved:7
+ * bit 31 : Descriptor Ready bit:1 - It is initiated to
+ * zero by the driver when the ring is created. It is set by the HW
+ * to one for each completed status message. Each wrap around,
+ * the DR bit value is flipped.
+ * [dword 1]
+ * bit 0..31 : timestamp:32 - Set when MPDU is transmitted.
+ * [dword 2]
+ * bit 0.. 4 : MCS:5 - The transmitted MCS value
+ * bit 5 : Reserved:1
+ * bit 6.. 7 : CB mode:2 - 0-DMG 1-EDMG 2-Wide
+ * bit 8..12 : QID:5 - The QID that was used for the transmission
+ * bit 13..15 : Reserved:3
+ * bit 16..20 : Num of MSDUs:5 - Number of MSDUs in the aggregation
+ * bit 21..22 : Reserved:2
+ * bit 23 : Retry:1 - An indication that the transmission was retried
+ * bit 24..31 : TX-Sector:8 - the antenna sector that was used for
+ * transmission
+ * [dword 3]
+ * bit 0..11 : Sequence number:12 - The Sequence Number that was used
+ * for the MPDU transmission
+ * bit 12..31 : Reserved:20
+ */
+struct wil_ring_tx_status {
+ u8 num_descriptors;
+ u8 ring_id;
+ u8 status;
+ u8 desc_ready; /* Only the last bit should be set */
+ u32 timestamp;
+ u32 d2;
+ u16 seq_number; /* Only the first 12 bits */
+ u16 w7;
+} __packed;
+
+/* Enhanced Rx status message - compressed part
+ * [dword 0]
+ * bit 0.. 2 : L2 Rx Status:3 - The L2 packet reception Status
+ * 0-Success, 1-MIC Error, 2-Key Error, 3-Replay Error,
+ * 4-A-MSDU Error, 5-Reserved, 6-Reserved, 7-FCS Error
+ * bit 3.. 4 : L3 Rx Status:2 - Bit0 - L3I - L3 identified and checksum
+ * calculated, Bit1- L3Err - IPv4 Checksum Error
+ * bit 5.. 6 : L4 Rx Status:2 - Bit0 - L4I - L4 identified and checksum
+ * calculated, Bit1- L4Err - TCP/UDP Checksum Error
+ * bit 7 : Reserved:1
+ * bit 8..19 : Flow ID:12 - MSDU flow ID
+ * bit 20..21 : MID:2 - The MAC ID
+ * bit 22 : MID_V:1 - The MAC ID field is valid
+ * bit 23 : L3T:1 - IP types: 0-IPv6, 1-IPv4
+ * bit 24 : L4T:1 - Layer 4 Type: 0-UDP, 1-TCP
+ * bit 25 : BC:1 - The received MPDU is broadcast
+ * bit 26 : MC:1 - The received MPDU is multicast
+ * bit 27 : Raw:1 - The MPDU received with no translation
+ * bit 28 : Sec:1 - The FC control (b14) - Frame Protected
+ * bit 29 : Error:1 - An error is set when (L2 status != 0) ||
+ * (L3 status == 3) || (L4 status == 3)
+ * bit 30 : EOP:1 - End of MSDU signaling. It is set to mark the end
+ * of the transfer, otherwise the status indicates buffer
+ * only completion.
+ * bit 31 : Descriptor Ready bit:1 - It is initiated to
+ * zero by the driver when the ring is created. It is set
+ * by the HW to one for each completed status message.
+ * Each wrap around, the DR bit value is flipped.
+ * [dword 1]
+ * bit 0.. 5 : MAC Len:6 - The number of bytes that are used for L2 header
+ * bit 6..11 : IPLEN:6 - The number of DW that are used for L3 header
+ * bit 12..15 : I4Len:4 - The number of DW that are used for L4 header
+ * bit 16..21 : MCS:6 - The received MCS field from the PLCP Header
+ * bit 22..23 : CB mode:2 - The CB Mode: 0-DMG, 1-EDMG, 2-Wide
+ * bit 24..27 : Data Offset:4 - The data offset, a code that describe the
+ * payload shift from the beginning of the buffer:
+ * 0 - 0 Bytes, 3 - 2 Bytes
+ * bit 28 : A-MSDU Present:1 - The QoS (b7) A-MSDU present field
+ * bit 29 : A-MSDU Type:1 The QoS (b8) A-MSDU Type field
+ * bit 30 : A-MPDU:1 - Packet is part of aggregated MPDU
+ * bit 31 : Key ID:1 - The extracted Key ID from the encryption header
+ * [dword 2]
+ * bit 0..15 : Buffer ID:16 - The Buffer Identifier
+ * bit 16..31 : Length:16 - It indicates the valid bytes that are stored
+ * in the current descriptor buffer. For multiple buffer
+ * descriptor, SW need to sum the total descriptor length
+ * in all buffers to produce the packet length
+ * [dword 3]
+ * bit 0..31 : timestamp:32 - The MPDU Timestamp.
+ */
+struct wil_rx_status_compressed {
+ u32 d0;
+ u32 d1;
+ __le16 buff_id;
+ __le16 length;
+ u32 timestamp;
+} __packed;
+
+/* Enhanced Rx status message - extension part
+ * [dword 0]
+ * bit 0.. 4 : QID:5 - The Queue Identifier that the packet is received
+ * from
+ * bit 5.. 7 : Reserved:3
+ * bit 8..11 : TID:4 - The QoS (b3-0) TID Field
+ * bit 12..15 Source index:4 - The Source index that was found
+ during Parsing the TA. This field is used to define the
+ source of the packet
+ * bit 16..18 : Destination index:3 - The Destination index that
+ was found during Parsing the RA.
+ * bit 19..20 : DS Type:2 - The FC Control (b9-8) - From / To DS
+ * bit 21..22 : MIC ICR:2 - this signal tells the DMA to assert an
+ interrupt after it writes the packet
+ * bit 23 : ESOP:1 - The QoS (b4) ESOP field
+ * bit 24 : RDG:1
+ * bit 25..31 : Reserved:7
+ * [dword 1]
+ * bit 0.. 1 : Frame Type:2 - The FC Control (b3-2) - MPDU Type
+ (management, data, control and extension)
+ * bit 2.. 5 : Syb type:4 - The FC Control (b7-4) - Frame Subtype
+ * bit 6..11 : Ext sub type:6 - The FC Control (b11-8) - Frame Extended
+ * Subtype
+ * bit 12..13 : ACK Policy:2 - The QoS (b6-5) ACK Policy fields
+ * bit 14 : DECRYPT_BYP:1 - The MPDU is bypass by the decryption unit
+ * bit 15..23 : Reserved:9
+ * bit 24..31 : RSSI/SNR:8 - The RSSI / SNR measurement for the received
+ * MPDU
+ * [dword 2]
+ * bit 0..11 : SN:12 - The received Sequence number field
+ * bit 12..15 : Reserved:4
+ * bit 16..31 : PN bits [15:0]:16
+ * [dword 3]
+ * bit 0..31 : PN bits [47:16]:32
+ */
+struct wil_rx_status_extension {
+ u32 d0;
+ u32 d1;
+ __le16 seq_num; /* only lower 12 bits */
+ u16 pn_15_0;
+ u32 pn_47_16;
+} __packed;
+
+struct wil_rx_status_extended {
+ struct wil_rx_status_compressed comp;
+ struct wil_rx_status_extension ext;
+} __packed;
+
+static inline void *wil_skb_rxstatus(struct sk_buff *skb)
+{
+ return (void *)skb->cb;
+}
+
+static inline __le16 wil_rx_status_get_length(void *msg)
+{
+ return ((struct wil_rx_status_compressed *)msg)->length;
+}
+
+static inline u8 wil_rx_status_get_mcs(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1,
+ 16, 21);
+}
+
+static inline u16 wil_rx_status_get_flow_id(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 8, 19);
+}
+
+static inline u8 wil_rx_status_get_mcast(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 26, 26);
+}
+
+/**
+ * In case of DLPF miss the parsing of flow Id should be as follows:
+ * dest_id:2
+ * src_id :3 - cid
+ * tid:3
+ * Otherwise:
+ * tid:4
+ * cid:4
+ */
+
+static inline u8 wil_rx_status_get_cid(void *msg)
+{
+ u16 val = wil_rx_status_get_flow_id(msg);
+
+ if (val & WIL_RX_EDMA_DLPF_LU_MISS_BIT)
+ /* CID is in bits 2..4 */
+ return (val >> WIL_RX_EDMA_DLPF_LU_MISS_CID_POS) &
+ WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK;
+ else
+ /* CID is in bits 4..7 */
+ return (val >> WIL_RX_EDMA_DLPF_LU_HIT_CID_POS) &
+ WIL_RX_EDMA_DLPF_LU_HIT_CID_TID_MASK;
+}
+
+static inline u8 wil_rx_status_get_tid(void *msg)
+{
+ u16 val = wil_rx_status_get_flow_id(msg);
+
+ if (val & WIL_RX_EDMA_DLPF_LU_MISS_BIT)
+ /* TID is in bits 5..7 */
+ return (val >> WIL_RX_EDMA_DLPF_LU_MISS_TID_POS) &
+ WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK;
+ else
+ /* TID is in bits 0..3 */
+ return val & WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK;
+}
+
+static inline int wil_rx_status_get_desc_rdy_bit(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 31, 31);
+}
+
+static inline int wil_rx_status_get_eop(void *msg) /* EoP = End of Packet */
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 30, 30);
+}
+
+static inline __le16 wil_rx_status_get_buff_id(void *msg)
+{
+ return ((struct wil_rx_status_compressed *)msg)->buff_id;
+}
+
+static inline u8 wil_rx_status_get_data_offset(void *msg)
+{
+ u8 val = WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1,
+ 24, 27);
+
+ switch (val) {
+ case 0: return 0;
+ case 3: return 2;
+ default: return 0xFF;
+ }
+}
+
+static inline int wil_rx_status_get_frame_type(struct wil6210_priv *wil,
+ void *msg)
+{
+ if (wil->use_compressed_rx_status)
+ return IEEE80211_FTYPE_DATA;
+
+ return WIL_GET_BITS(((struct wil_rx_status_extended *)msg)->ext.d1,
+ 0, 1) << 2;
+}
+
+static inline int wil_rx_status_get_fc1(struct wil6210_priv *wil, void *msg)
+{
+ if (wil->use_compressed_rx_status)
+ return 0;
+
+ return WIL_GET_BITS(((struct wil_rx_status_extended *)msg)->ext.d1,
+ 0, 5) << 2;
+}
+
+static inline __le16 wil_rx_status_get_seq(struct wil6210_priv *wil, void *msg)
+{
+ if (wil->use_compressed_rx_status)
+ return 0;
+
+ return ((struct wil_rx_status_extended *)msg)->ext.seq_num;
+}
+
+static inline u8 wil_rx_status_get_retry(void *msg)
+{
+ /* retry bit is missing in EDMA HW. return 1 to be on the safe side */
+ return 1;
+}
+
+static inline int wil_rx_status_get_mid(void *msg)
+{
+ if (!(((struct wil_rx_status_compressed *)msg)->d0 &
+ WIL_RX_EDMA_MID_VALID_BIT))
+ return 0; /* use the default MID */
+
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 20, 21);
+}
+
+static inline int wil_rx_status_get_error(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 29, 29);
+}
+
+static inline int wil_rx_status_get_l2_rx_status(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 0, 2);
+}
+
+static inline int wil_rx_status_get_l3_rx_status(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 3, 4);
+}
+
+static inline int wil_rx_status_get_l4_rx_status(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 5, 6);
+}
+
+static inline int wil_rx_status_get_security(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 28, 28);
+}
+
+static inline u8 wil_rx_status_get_key_id(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1,
+ 31, 31);
+}
+
+static inline u8 wil_tx_status_get_mcs(struct wil_ring_tx_status *msg)
+{
+ return WIL_GET_BITS(msg->d2, 0, 4);
+}
+
+static inline u32 wil_ring_next_head(struct wil_ring *ring)
+{
+ return (ring->swhead + 1) % ring->size;
+}
+
+static inline void wil_desc_set_addr_edma(struct wil_ring_dma_addr *addr,
+ __le16 *addr_high_high,
+ dma_addr_t pa)
+{
+ addr->addr_low = cpu_to_le32(lower_32_bits(pa));
+ addr->addr_high = cpu_to_le16((u16)upper_32_bits(pa));
+ *addr_high_high = cpu_to_le16((u16)(upper_32_bits(pa) >> 16));
+}
+
+static inline
+dma_addr_t wil_tx_desc_get_addr_edma(struct wil_ring_tx_enhanced_dma *dma)
+{
+ return le32_to_cpu(dma->addr.addr_low) |
+ ((u64)le16_to_cpu(dma->addr.addr_high) << 32) |
+ ((u64)le16_to_cpu(dma->addr_high_high) << 48);
+}
+
+static inline
+dma_addr_t wil_rx_desc_get_addr_edma(struct wil_ring_rx_enhanced_dma *dma)
+{
+ return le32_to_cpu(dma->addr.addr_low) |
+ ((u64)le16_to_cpu(dma->addr.addr_high) << 32) |
+ ((u64)le16_to_cpu(dma->addr_high_high) << 48);
+}
+
+void wil_configure_interrupt_moderation_edma(struct wil6210_priv *wil);
+int wil_tx_sring_handler(struct wil6210_priv *wil,
+ struct wil_status_ring *sring);
+void wil_rx_handle_edma(struct wil6210_priv *wil, int *quota);
+void wil_init_txrx_ops_edma(struct wil6210_priv *wil);
+
+#endif /* WIL6210_TXRX_EDMA_H */
+
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index b623510c6f6c..17c294b1ead1 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -24,6 +24,7 @@
#include <net/cfg80211.h>
#include <linux/timex.h>
#include <linux/types.h>
+#include <linux/irqreturn.h>
#include "wmi.h"
#include "wil_platform.h"
#include "fw.h"
@@ -36,6 +37,11 @@ extern bool rx_align_2;
extern bool rx_large_buf;
extern bool debug_fw;
extern bool disable_ap_sme;
+extern bool ftm_mode;
+
+struct wil6210_priv;
+struct wil6210_vif;
+union wil_tx_desc;
#define WIL_NAME "wil6210"
@@ -45,11 +51,17 @@ extern bool disable_ap_sme;
#define WIL_FW_NAME_SPARROW_PLUS "wil6210_sparrow_plus.fw"
#define WIL_FW_NAME_FTM_SPARROW_PLUS "wil6210_sparrow_plus_ftm.fw"
+#define WIL_FW_NAME_TALYN "wil6436.fw"
+#define WIL_FW_NAME_FTM_TALYN "wil6436_ftm.fw"
+#define WIL_BRD_NAME_TALYN "wil6436.brd"
+
#define WIL_BOARD_FILE_NAME "wil6210.brd" /* board & radio parameters */
#define WIL_DEFAULT_BUS_REQUEST_KBPS 128000 /* ~1Gbps */
#define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */
+#define WIL_NUM_LATENCY_BINS 200
+
/* maximum number of virtual interfaces the driver supports
* (including the main interface)
*/
@@ -80,6 +92,10 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
#define WIL6210_NAPI_BUDGET (16) /* arbitrary */
#define WIL_MAX_AMPDU_SIZE (64 * 1024) /* FW/HW limit */
#define WIL_MAX_AGG_WSIZE (32) /* FW/HW limit */
+#define WIL_MAX_AMPDU_SIZE_128 (128 * 1024) /* FW/HW limit */
+#define WIL_MAX_AGG_WSIZE_64 (64) /* FW/HW limit */
+#define WIL6210_MAX_STATUS_RINGS (8)
+
/* Hardware offload block adds the following:
* 26 bytes - 3-address QoS data header
* 8 bytes - IV + EIV (for GCMP)
@@ -203,7 +219,9 @@ struct RGF_ICR {
#define RGF_USER_SPARROW_M_4 (0x880c50) /* Sparrow */
#define BIT_SPARROW_M_4_SEL_SLEEP_OR_REF BIT(2)
#define RGF_USER_OTP_HW_RD_MACHINE_1 (0x880ce0)
- #define BIT_NO_FLASH_INDICATION BIT(8)
+ #define BIT_OTP_SIGNATURE_ERR_TALYN_MB BIT(0)
+ #define BIT_OTP_HW_SECTION_DONE_TALYN_MB BIT(2)
+ #define BIT_NO_FLASH_INDICATION BIT(8)
#define RGF_USER_XPM_IFC_RD_TIME1 (0x880cec)
#define RGF_USER_XPM_IFC_RD_TIME2 (0x880cf0)
#define RGF_USER_XPM_IFC_RD_TIME3 (0x880cf4)
@@ -284,6 +302,8 @@ struct RGF_ICR {
#define BIT_DMA_ITR_RX_IDL_CNT_CTL_FOREVER BIT(2)
#define BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR BIT(3)
#define BIT_DMA_ITR_RX_IDL_CNT_CTL_REACHED_TRESH BIT(4)
+#define RGF_DMA_MISC_CTL (0x881d6c)
+ #define BIT_OFUL34_RDY_VALID_BUG_FIX_EN BIT(7)
#define RGF_DMA_PSEUDO_CAUSE (0x881c68)
#define RGF_DMA_PSEUDO_CAUSE_MASK_SW (0x881c6c)
@@ -305,20 +325,49 @@ struct RGF_ICR {
#define RGF_CAF_PLL_LOCK_STATUS (0x88afec)
#define BIT_CAF_OSC_DIG_XTAL_STABLE BIT(0)
+#define RGF_OTP_QC_SECURED (0x8a0038)
+ #define BIT_BOOT_FROM_ROM BIT(31)
+
+/* eDMA */
+#define RGF_INT_COUNT_ON_SPECIAL_EVT (0x8b62d8)
+
+#define RGF_INT_CTRL_INT_GEN_CFG_0 (0x8bc000)
+#define RGF_INT_CTRL_INT_GEN_CFG_1 (0x8bc004)
+#define RGF_INT_GEN_TIME_UNIT_LIMIT (0x8bc0c8)
+
+#define RGF_INT_GEN_CTRL (0x8bc0ec)
+ #define BIT_CONTROL_0 BIT(0)
+
+/* eDMA status interrupts */
+#define RGF_INT_GEN_RX_ICR (0x8bc0f4)
+ #define BIT_RX_STATUS_IRQ BIT(WIL_RX_STATUS_IRQ_IDX)
+#define RGF_INT_GEN_TX_ICR (0x8bc110)
+ #define BIT_TX_STATUS_IRQ BIT(WIL_TX_STATUS_IRQ_IDX)
+#define RGF_INT_CTRL_RX_INT_MASK (0x8bc12c)
+#define RGF_INT_CTRL_TX_INT_MASK (0x8bc130)
+
+#define RGF_INT_GEN_IDLE_TIME_LIMIT (0x8bc134)
+
#define USER_EXT_USER_PMU_3 (0x88d00c)
#define BIT_PMU_DEVICE_RDY BIT(0)
#define RGF_USER_JTAG_DEV_ID (0x880b34) /* device ID */
#define JTAG_DEV_ID_SPARROW (0x2632072f)
#define JTAG_DEV_ID_TALYN (0x7e0e1)
+ #define JTAG_DEV_ID_TALYN_MB (0x1007e0e1)
#define RGF_USER_REVISION_ID (0x88afe4)
#define RGF_USER_REVISION_ID_MASK (3)
#define REVISION_ID_SPARROW_B0 (0x0)
#define REVISION_ID_SPARROW_D0 (0x3)
+#define RGF_OTP_MAC_TALYN_MB (0x8a0304)
#define RGF_OTP_MAC (0x8a0620)
+/* Talyn-MB */
+#define RGF_USER_USER_CPU_0_TALYN_MB (0x8c0138)
+#define RGF_USER_MAC_CPU_0_TALYN_MB (0x8c0154)
+
/* crash codes for FW/Ucode stored here */
/* ASSERT RGFs */
@@ -332,6 +381,7 @@ enum {
HW_VER_SPARROW_B0, /* REVISION_ID_SPARROW_B0 */
HW_VER_SPARROW_D0, /* REVISION_ID_SPARROW_D0 */
HW_VER_TALYN, /* JTAG_DEV_ID_TALYN */
+ HW_VER_TALYN_MB /* JTAG_DEV_ID_TALYN_MB */
};
/* popular locations */
@@ -349,7 +399,14 @@ enum {
/* Hardware definitions end */
#define SPARROW_FW_MAPPING_TABLE_SIZE 10
#define TALYN_FW_MAPPING_TABLE_SIZE 13
-#define MAX_FW_MAPPING_TABLE_SIZE 13
+#define TALYN_MB_FW_MAPPING_TABLE_SIZE 19
+#define MAX_FW_MAPPING_TABLE_SIZE 19
+
+/* Common representation of physical address in wil ring */
+struct wil_ring_dma_addr {
+ __le32 addr_low;
+ __le16 addr_high;
+} __packed;
struct fw_map {
u32 from; /* linker address - from, inclusive */
@@ -357,12 +414,14 @@ struct fw_map {
u32 host; /* PCI/Host address - BAR0 + 0x880000 */
const char *name; /* for debugfs */
bool fw; /* true if FW mapping, false if UCODE mapping */
+ bool crash_dump; /* true if should be dumped during crash dump */
};
/* array size should be in sync with actual definition in the wmi.c */
extern const struct fw_map sparrow_fw_mapping[SPARROW_FW_MAPPING_TABLE_SIZE];
extern const struct fw_map sparrow_d0_mac_rgf_ext;
extern const struct fw_map talyn_fw_mapping[TALYN_FW_MAPPING_TABLE_SIZE];
+extern const struct fw_map talyn_mb_fw_mapping[TALYN_MB_FW_MAPPING_TABLE_SIZE];
extern struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE];
/**
@@ -438,7 +497,7 @@ enum { /* for wil_ctx.mapped_as */
};
/**
- * struct wil_ctx - software context for Vring descriptor
+ * struct wil_ctx - software context for ring descriptor
*/
struct wil_ctx {
struct sk_buff *skb;
@@ -446,22 +505,123 @@ struct wil_ctx {
u8 mapped_as;
};
-union vring_desc;
+struct wil_desc_ring_rx_swtail { /* relevant for enhanced DMA only */
+ u32 *va;
+ dma_addr_t pa;
+};
-struct vring {
+/**
+ * A general ring structure, used for RX and TX.
+ * In legacy DMA it represents the vring,
+ * In enahnced DMA it represents the descriptor ring (vrings are handled by FW)
+ */
+struct wil_ring {
dma_addr_t pa;
- volatile union vring_desc *va; /* vring_desc[size], WriteBack by DMA */
- u16 size; /* number of vring_desc elements */
+ volatile union wil_ring_desc *va;
+ u16 size; /* number of wil_ring_desc elements */
u32 swtail;
u32 swhead;
u32 hwtail; /* write here to inform hw */
struct wil_ctx *ctx; /* ctx[size] - software context */
+ struct wil_desc_ring_rx_swtail edma_rx_swtail;
+ bool is_rx;
+};
+
+/**
+ * Additional data for Rx ring.
+ * Used for enhanced DMA RX chaining.
+ */
+struct wil_ring_rx_data {
+ /* the skb being assembled */
+ struct sk_buff *skb;
+ /* true if we are skipping a bad fragmented packet */
+ bool skipping;
+ u16 buff_size;
+};
+
+/**
+ * Status ring structure, used for enhanced DMA completions for RX and TX.
+ */
+struct wil_status_ring {
+ dma_addr_t pa;
+ void *va; /* pointer to ring_[tr]x_status elements */
+ u16 size; /* number of status elements */
+ size_t elem_size; /* status element size in bytes */
+ u32 swhead;
+ u32 hwtail; /* write here to inform hw */
+ bool is_rx;
+ u8 desc_rdy_pol; /* Expected descriptor ready bit polarity */
+ struct wil_ring_rx_data rx_data;
+};
+
+#define WIL_STA_TID_NUM (16)
+#define WIL_MCS_MAX (12) /* Maximum MCS supported */
+
+struct wil_net_stats {
+ unsigned long rx_packets;
+ unsigned long tx_packets;
+ unsigned long rx_bytes;
+ unsigned long tx_bytes;
+ unsigned long tx_errors;
+ u32 tx_latency_min_us;
+ u32 tx_latency_max_us;
+ u64 tx_latency_total_us;
+ unsigned long rx_dropped;
+ unsigned long rx_non_data_frame;
+ unsigned long rx_short_frame;
+ unsigned long rx_large_frame;
+ unsigned long rx_replay;
+ unsigned long rx_mic_error;
+ unsigned long rx_key_error; /* eDMA specific */
+ unsigned long rx_amsdu_error; /* eDMA specific */
+ unsigned long rx_csum_err;
+ u16 last_mcs_rx;
+ u64 rx_per_mcs[WIL_MCS_MAX + 1];
};
/**
- * Additional data for Tx Vring
+ * struct tx_rx_ops - different TX/RX ops for legacy and enhanced
+ * DMA flow
*/
-struct vring_tx_data {
+struct wil_txrx_ops {
+ void (*configure_interrupt_moderation)(struct wil6210_priv *wil);
+ /* TX ops */
+ int (*ring_init_tx)(struct wil6210_vif *vif, int ring_id,
+ int size, int cid, int tid);
+ void (*ring_fini_tx)(struct wil6210_priv *wil, struct wil_ring *ring);
+ int (*ring_init_bcast)(struct wil6210_vif *vif, int id, int size);
+ int (*tx_init)(struct wil6210_priv *wil);
+ void (*tx_fini)(struct wil6210_priv *wil);
+ int (*tx_desc_map)(union wil_tx_desc *desc, dma_addr_t pa,
+ u32 len, int ring_index);
+ void (*tx_desc_unmap)(struct device *dev,
+ union wil_tx_desc *desc,
+ struct wil_ctx *ctx);
+ int (*tx_ring_tso)(struct wil6210_priv *wil, struct wil6210_vif *vif,
+ struct wil_ring *ring, struct sk_buff *skb);
+ irqreturn_t (*irq_tx)(int irq, void *cookie);
+ /* RX ops */
+ int (*rx_init)(struct wil6210_priv *wil, u16 ring_size);
+ void (*rx_fini)(struct wil6210_priv *wil);
+ int (*wmi_addba_rx_resp)(struct wil6210_priv *wil, u8 mid, u8 cid,
+ u8 tid, u8 token, u16 status, bool amsdu,
+ u16 agg_wsize, u16 timeout);
+ void (*get_reorder_params)(struct wil6210_priv *wil,
+ struct sk_buff *skb, int *tid, int *cid,
+ int *mid, u16 *seq, int *mcast, int *retry);
+ void (*get_netif_rx_params)(struct sk_buff *skb,
+ int *cid, int *security);
+ int (*rx_crypto_check)(struct wil6210_priv *wil, struct sk_buff *skb);
+ int (*rx_error_check)(struct wil6210_priv *wil, struct sk_buff *skb,
+ struct wil_net_stats *stats);
+ bool (*is_rx_idle)(struct wil6210_priv *wil);
+ irqreturn_t (*irq_rx)(int irq, void *cookie);
+};
+
+/**
+ * Additional data for Tx ring
+ */
+struct wil_ring_tx_data {
bool dot1x_open;
int enabled;
cycles_t idle, last_idle, begin;
@@ -503,6 +663,8 @@ struct pci_dev;
* @drop_dup: duplicate frames dropped for this reorder buffer
* @drop_old: old frames dropped for this reorder buffer
* @first_time: true when this buffer used 1-st time
+ * @mcast_last_seq: sequence number (SN) of last received multicast packet
+ * @drop_dup_mcast: duplicate multicast frames dropped for this reorder buffer
*/
struct wil_tid_ampdu_rx {
struct sk_buff **reorder_buf;
@@ -516,6 +678,8 @@ struct wil_tid_ampdu_rx {
unsigned long long drop_dup;
unsigned long long drop_old;
bool first_time; /* is it 1-st time this buffer used? */
+ u16 mcast_last_seq; /* multicast dup detection */
+ unsigned long long drop_dup_mcast;
};
/**
@@ -550,24 +714,6 @@ enum wil_sta_status {
wil_sta_connected = 2,
};
-#define WIL_STA_TID_NUM (16)
-#define WIL_MCS_MAX (12) /* Maximum MCS supported */
-
-struct wil_net_stats {
- unsigned long rx_packets;
- unsigned long tx_packets;
- unsigned long rx_bytes;
- unsigned long tx_bytes;
- unsigned long tx_errors;
- unsigned long rx_dropped;
- unsigned long rx_non_data_frame;
- unsigned long rx_short_frame;
- unsigned long rx_large_frame;
- unsigned long rx_replay;
- u16 last_mcs_rx;
- u64 rx_per_mcs[WIL_MCS_MAX + 1];
-};
-
/**
* struct wil_sta_info - data for peer
*
@@ -581,6 +727,14 @@ struct wil_sta_info {
u8 mid;
enum wil_sta_status status;
struct wil_net_stats stats;
+ /**
+ * 20 latency bins. 1st bin counts packets with latency
+ * of 0..tx_latency_res, last bin counts packets with latency
+ * of 19*tx_latency_res and above.
+ * tx_latency_res is configured from "tx_latency" debug-fs.
+ */
+ u64 *tx_latency_bins;
+ struct wmi_link_stats_basic fw_stats_basic;
/* Rx BACK */
struct wil_tid_ampdu_rx *tid_rx[WIL_STA_TID_NUM];
spinlock_t tid_rx_lock; /* guarding tid_rx array */
@@ -681,7 +835,7 @@ struct wil6210_vif {
u8 hidden_ssid; /* relevant in AP mode */
u32 ap_isolate; /* no intra-BSS communication */
bool pbss;
- int bcast_vring;
+ int bcast_ring;
struct cfg80211_bss *bss; /* connected bss, relevant in STA mode */
int locally_generated_disc; /* relevant in STA mode */
struct timer_list connect_timer;
@@ -695,6 +849,39 @@ struct wil6210_vif {
struct mutex probe_client_mutex; /* protect @probe_client_pending */
struct work_struct probe_client_worker;
int net_queue_stopped; /* netif_tx_stop_all_queues invoked */
+ bool fw_stats_ready; /* per-cid statistics are ready inside sta_info */
+ u64 fw_stats_tsf; /* measurement timestamp */
+};
+
+/**
+ * RX buffer allocated for enhanced DMA RX descriptors
+ */
+struct wil_rx_buff {
+ struct sk_buff *skb;
+ struct list_head list;
+ int id;
+};
+
+/**
+ * During Rx completion processing, the driver extracts a buffer ID which
+ * is used as an index to the rx_buff_mgmt.buff_arr array and then the SKB
+ * is given to the network stack and the buffer is moved from the 'active'
+ * list to the 'free' list.
+ * During Rx refill, SKBs are attached to free buffers and moved to the
+ * 'active' list.
+ */
+struct wil_rx_buff_mgmt {
+ struct wil_rx_buff *buff_arr;
+ size_t size; /* number of items in buff_arr */
+ struct list_head active;
+ struct list_head free;
+ unsigned long free_list_empty_cnt; /* statistics */
+};
+
+struct wil_fw_stats_global {
+ bool ready;
+ u64 tsf; /* measurement timestamp */
+ struct wmi_link_stats_global stats;
};
struct wil6210_priv {
@@ -702,6 +889,7 @@ struct wil6210_priv {
u32 bar_size;
struct wiphy *wiphy;
struct net_device *main_ndev;
+ int n_msi;
void __iomem *csr;
DECLARE_BITMAP(status, wil_status_last);
u8 fw_version[ETHTOOL_FWVERS_LEN];
@@ -761,14 +949,20 @@ struct wil6210_priv {
struct net_device napi_ndev; /* dummy net_device serving all VIFs */
/* DMA related */
- struct vring vring_rx;
+ struct wil_ring ring_rx;
unsigned int rx_buf_len;
- struct vring vring_tx[WIL6210_MAX_TX_RINGS];
- struct vring_tx_data vring_tx_data[WIL6210_MAX_TX_RINGS];
- u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */
+ struct wil_ring ring_tx[WIL6210_MAX_TX_RINGS];
+ struct wil_ring_tx_data ring_tx_data[WIL6210_MAX_TX_RINGS];
+ struct wil_status_ring srings[WIL6210_MAX_STATUS_RINGS];
+ u8 num_rx_status_rings;
+ int tx_sring_idx;
+ u8 ring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */
struct wil_sta_info sta[WIL6210_MAX_CID];
- u32 vring_idle_trsh; /* HW fetches up to 16 descriptors at once */
+ u32 ring_idle_trsh; /* HW fetches up to 16 descriptors at once */
u32 dma_addr_size; /* indicates dma addr size */
+ struct wil_rx_buff_mgmt rx_buff_mgmt;
+ bool use_enhanced_dma_hw;
+ struct wil_txrx_ops txrx_ops;
struct mutex mutex; /* for wil6210_priv access in wil_{up|down} */
/* statistics */
@@ -781,6 +975,8 @@ struct wil6210_priv {
u8 wakeup_trigger;
struct wil_suspend_stats suspend_stats;
struct wil_debugfs_data dbg_data;
+ bool tx_latency; /* collect TX latency measurements */
+ size_t tx_latency_res; /* bin resolution in usec */
void *platform_handle;
struct wil_platform_ops platform_ops;
@@ -811,6 +1007,21 @@ struct wil6210_priv {
u32 rgf_fw_assert_code_addr;
u32 rgf_ucode_assert_code_addr;
u32 iccm_base;
+
+ /* relevant only for eDMA */
+ bool use_compressed_rx_status;
+ u32 rx_status_ring_order;
+ u32 tx_status_ring_order;
+ u32 rx_buff_id_count;
+ bool amsdu_en;
+ bool use_rx_hw_reordering;
+ bool secured_boot;
+ u8 boot_config;
+
+ struct wil_fw_stats_global fw_stats_global;
+
+ u32 max_agg_wsize;
+ u32 max_ampdu_size;
};
#define wil_to_wiphy(i) (i->wiphy)
@@ -894,6 +1105,8 @@ static inline void wil_c(struct wil6210_priv *wil, u32 reg, u32 val)
wil_w(wil, reg, wil_r(wil, reg) & ~val);
}
+void wil_get_board_file(struct wil6210_priv *wil, char *buf, size_t len);
+
#if defined(CONFIG_DYNAMIC_DEBUG)
#define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize, \
groupsize, buf, len, ascii) \
@@ -990,7 +1203,7 @@ int wmi_add_cipher_key(struct wil6210_vif *vif, u8 key_index,
int key_usage);
int wmi_echo(struct wil6210_priv *wil);
int wmi_set_ie(struct wil6210_vif *vif, u8 type, u16 ie_len, const void *ie);
-int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);
+int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring);
int wmi_rxon(struct wil6210_priv *wil, bool on);
int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac,
@@ -1010,13 +1223,14 @@ int wmi_new_sta(struct wil6210_vif *vif, const u8 *mac, u8 aid);
int wmi_port_allocate(struct wil6210_priv *wil, u8 mid,
const u8 *mac, enum nl80211_iftype iftype);
int wmi_port_delete(struct wil6210_priv *wil, u8 mid);
+int wmi_link_stats_cfg(struct wil6210_vif *vif, u32 type, u8 cid, u32 interval);
int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid,
u8 cidxtid, u8 dialog_token, __le16 ba_param_set,
__le16 ba_timeout, __le16 ba_seq_ctrl);
int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize);
void wil6210_clear_irq(struct wil6210_priv *wil);
-int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi);
+int wil6210_init_irq(struct wil6210_priv *wil, int irq);
void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
void wil_mask_irq(struct wil6210_priv *wil);
void wil_unmask_irq(struct wil6210_priv *wil);
@@ -1083,30 +1297,28 @@ void wil_probe_client_flush(struct wil6210_vif *vif);
void wil_probe_client_worker(struct work_struct *work);
void wil_disconnect_worker(struct work_struct *work);
-int wil_rx_init(struct wil6210_priv *wil, u16 size);
-void wil_rx_fini(struct wil6210_priv *wil);
+void wil_init_txrx_ops(struct wil6210_priv *wil);
/* TX API */
-int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
- int cid, int tid);
-void wil_vring_fini_tx(struct wil6210_priv *wil, int id);
-int wil_tx_init(struct wil6210_vif *vif, int cid);
+int wil_ring_init_tx(struct wil6210_vif *vif, int cid);
int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size);
int wil_bcast_init(struct wil6210_vif *vif);
void wil_bcast_fini(struct wil6210_vif *vif);
void wil_bcast_fini_all(struct wil6210_priv *wil);
void wil_update_net_queues(struct wil6210_priv *wil, struct wil6210_vif *vif,
- struct vring *vring, bool should_stop);
+ struct wil_ring *ring, bool should_stop);
void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif,
- struct vring *vring, bool check_stop);
+ struct wil_ring *ring, bool check_stop);
netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev);
int wil_tx_complete(struct wil6210_vif *vif, int ringid);
void wil6210_unmask_irq_tx(struct wil6210_priv *wil);
+void wil6210_unmask_irq_tx_edma(struct wil6210_priv *wil);
/* RX API */
void wil_rx_handle(struct wil6210_priv *wil, int *quota);
void wil6210_unmask_irq_rx(struct wil6210_priv *wil);
+void wil6210_unmask_irq_rx_edma(struct wil6210_priv *wil);
int wil_iftype_nl2wmi(enum nl80211_iftype type);
@@ -1127,7 +1339,6 @@ bool wil_is_wmi_idle(struct wil6210_priv *wil);
int wmi_resume(struct wil6210_priv *wil);
int wmi_suspend(struct wil6210_priv *wil);
bool wil_is_tx_idle(struct wil6210_priv *wil);
-bool wil_is_rx_idle(struct wil6210_priv *wil);
int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size);
void wil_fw_core_dump(struct wil6210_priv *wil);
@@ -1141,5 +1352,22 @@ int wmi_start_sched_scan(struct wil6210_priv *wil,
struct cfg80211_sched_scan_request *request);
int wmi_stop_sched_scan(struct wil6210_priv *wil);
int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len);
+int wmi_mgmt_tx_ext(struct wil6210_vif *vif, const u8 *buf, size_t len,
+ u8 channel, u16 duration_ms);
+
+int reverse_memcmp(const void *cs, const void *ct, size_t count);
+
+/* WMI for enhanced DMA */
+int wil_wmi_tx_sring_cfg(struct wil6210_priv *wil, int ring_id);
+int wil_wmi_cfg_def_rx_offload(struct wil6210_priv *wil,
+ u16 max_rx_pl_per_desc);
+int wil_wmi_rx_sring_add(struct wil6210_priv *wil, u16 ring_id);
+int wil_wmi_rx_desc_ring_add(struct wil6210_priv *wil, int status_ring_id);
+int wil_wmi_tx_desc_ring_add(struct wil6210_vif *vif, int ring_id, int cid,
+ int tid);
+int wil_wmi_bcast_desc_ring_add(struct wil6210_vif *vif, int ring_id);
+int wmi_addba_rx_resp_edma(struct wil6210_priv *wil, u8 mid, u8 cid,
+ u8 tid, u8 token, u16 status, bool amsdu,
+ u16 agg_wsize, u16 timeout);
#endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
index 1ed330674d9b..dc33a0b4c3fa 100644
--- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
+++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2015,2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -36,7 +37,7 @@ static int wil_fw_get_crash_dump_bounds(struct wil6210_priv *wil,
for (i = 1; i < ARRAY_SIZE(fw_mapping); i++) {
map = &fw_mapping[i];
- if (!map->fw)
+ if (!map->crash_dump)
continue;
if (map->host < host_min)
@@ -85,7 +86,7 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
map = &fw_mapping[i];
- if (!map->fw)
+ if (!map->crash_dump)
continue;
data = (void * __force)wil->csr + HOSTADDR(map->host);
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
index 177026e5323b..bca090611477 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.h
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -29,6 +29,7 @@ enum wil_platform_event {
enum wil_platform_features {
WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL = 0,
+ WIL_PLATFORM_FEATURE_TRIPLE_MSI = 1,
WIL_PLATFORM_FEATURE_MAX,
};
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 5d991243cdb5..42c02a20ec97 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -89,28 +89,28 @@ MODULE_PARM_DESC(led_id,
*/
const struct fw_map sparrow_fw_mapping[] = {
/* FW code RAM 256k */
- {0x000000, 0x040000, 0x8c0000, "fw_code", true},
+ {0x000000, 0x040000, 0x8c0000, "fw_code", true, true},
/* FW data RAM 32k */
- {0x800000, 0x808000, 0x900000, "fw_data", true},
+ {0x800000, 0x808000, 0x900000, "fw_data", true, true},
/* periph data 128k */
- {0x840000, 0x860000, 0x908000, "fw_peri", true},
+ {0x840000, 0x860000, 0x908000, "fw_peri", true, true},
/* various RGF 40k */
- {0x880000, 0x88a000, 0x880000, "rgf", true},
+ {0x880000, 0x88a000, 0x880000, "rgf", true, true},
/* AGC table 4k */
- {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true},
+ {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true, true},
/* Pcie_ext_rgf 4k */
- {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true},
+ {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true, true},
/* mac_ext_rgf 512b */
- {0x88c000, 0x88c200, 0x88c000, "mac_rgf_ext", true},
+ {0x88c000, 0x88c200, 0x88c000, "mac_rgf_ext", true, true},
/* upper area 548k */
- {0x8c0000, 0x949000, 0x8c0000, "upper", true},
+ {0x8c0000, 0x949000, 0x8c0000, "upper", true, true},
/* UCODE areas - accessible by debugfs blobs but not by
* wmi_addr_remap. UCODE areas MUST be added AFTER FW areas!
*/
/* ucode code RAM 128k */
- {0x000000, 0x020000, 0x920000, "uc_code", false},
+ {0x000000, 0x020000, 0x920000, "uc_code", false, false},
/* ucode data RAM 16k */
- {0x800000, 0x804000, 0x940000, "uc_data", false},
+ {0x800000, 0x804000, 0x940000, "uc_data", false, false},
};
/**
@@ -118,7 +118,7 @@ const struct fw_map sparrow_fw_mapping[] = {
* it is a bit larger to support extra features
*/
const struct fw_map sparrow_d0_mac_rgf_ext = {
- 0x88c000, 0x88c500, 0x88c000, "mac_rgf_ext", true
+ 0x88c000, 0x88c500, 0x88c000, "mac_rgf_ext", true, true
};
/**
@@ -134,34 +134,89 @@ const struct fw_map sparrow_d0_mac_rgf_ext = {
*/
const struct fw_map talyn_fw_mapping[] = {
/* FW code RAM 1M */
- {0x000000, 0x100000, 0x900000, "fw_code", true},
+ {0x000000, 0x100000, 0x900000, "fw_code", true, true},
/* FW data RAM 128k */
- {0x800000, 0x820000, 0xa00000, "fw_data", true},
+ {0x800000, 0x820000, 0xa00000, "fw_data", true, true},
/* periph. data RAM 96k */
- {0x840000, 0x858000, 0xa20000, "fw_peri", true},
+ {0x840000, 0x858000, 0xa20000, "fw_peri", true, true},
/* various RGF 40k */
- {0x880000, 0x88a000, 0x880000, "rgf", true},
+ {0x880000, 0x88a000, 0x880000, "rgf", true, true},
/* AGC table 4k */
- {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true},
+ {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true, true},
/* Pcie_ext_rgf 4k */
- {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true},
+ {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true, true},
/* mac_ext_rgf 1344b */
- {0x88c000, 0x88c540, 0x88c000, "mac_rgf_ext", true},
+ {0x88c000, 0x88c540, 0x88c000, "mac_rgf_ext", true, true},
/* ext USER RGF 4k */
- {0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true},
+ {0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true, true},
/* OTP 4k */
- {0x8a0000, 0x8a1000, 0x8a0000, "otp", true},
+ {0x8a0000, 0x8a1000, 0x8a0000, "otp", true, false},
/* DMA EXT RGF 64k */
- {0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true},
+ {0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true, true},
/* upper area 1536k */
- {0x900000, 0xa80000, 0x900000, "upper", true},
+ {0x900000, 0xa80000, 0x900000, "upper", true, true},
/* UCODE areas - accessible by debugfs blobs but not by
* wmi_addr_remap. UCODE areas MUST be added AFTER FW areas!
*/
/* ucode code RAM 256k */
- {0x000000, 0x040000, 0xa38000, "uc_code", false},
+ {0x000000, 0x040000, 0xa38000, "uc_code", false, false},
/* ucode data RAM 32k */
- {0x800000, 0x808000, 0xa78000, "uc_data", false},
+ {0x800000, 0x808000, 0xa78000, "uc_data", false, false},
+};
+
+/**
+ * @talyn_mb_fw_mapping provides memory remapping table for Talyn-MB
+ *
+ * array size should be in sync with the declaration in the wil6210.h
+ *
+ * Talyn MB memory mapping:
+ * Linker address PCI/Host address
+ * 0x880000 .. 0xc80000 4Mb BAR0
+ * 0x800000 .. 0x820000 0xa00000 .. 0xa20000 128k DCCM
+ * 0x840000 .. 0x858000 0xa20000 .. 0xa38000 96k PERIPH
+ */
+const struct fw_map talyn_mb_fw_mapping[] = {
+ /* FW code RAM 768k */
+ {0x000000, 0x0c0000, 0x900000, "fw_code", true, true},
+ /* FW data RAM 128k */
+ {0x800000, 0x820000, 0xa00000, "fw_data", true, true},
+ /* periph. data RAM 96k */
+ {0x840000, 0x858000, 0xa20000, "fw_peri", true, true},
+ /* various RGF 40k */
+ {0x880000, 0x88a000, 0x880000, "rgf", true, true},
+ /* AGC table 4k */
+ {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true, true},
+ /* Pcie_ext_rgf 4k */
+ {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true, true},
+ /* mac_ext_rgf 2256b */
+ {0x88c000, 0x88c8d0, 0x88c000, "mac_rgf_ext", true, true},
+ /* ext USER RGF 4k */
+ {0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true, true},
+ /* SEC PKA 16k */
+ {0x890000, 0x894000, 0x890000, "sec_pka", true, true},
+ /* SEC KDF RGF 3096b */
+ {0x898000, 0x898c18, 0x898000, "sec_kdf_rgf", true, true},
+ /* SEC MAIN 2124b */
+ {0x89a000, 0x89a84c, 0x89a000, "sec_main", true, true},
+ /* OTP 4k */
+ {0x8a0000, 0x8a1000, 0x8a0000, "otp", true, false},
+ /* DMA EXT RGF 64k */
+ {0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true, true},
+ /* DUM USER RGF 528b */
+ {0x8c0000, 0x8c0210, 0x8c0000, "dum_user_rgf", true, true},
+ /* DMA OFU 296b */
+ {0x8c2000, 0x8c2128, 0x8c2000, "dma_ofu", true, true},
+ /* ucode debug 4k */
+ {0x8c3000, 0x8c4000, 0x8c3000, "ucode_debug", true, true},
+ /* upper area 1536k */
+ {0x900000, 0xa80000, 0x900000, "upper", true, true},
+ /* UCODE areas - accessible by debugfs blobs but not by
+ * wmi_addr_remap. UCODE areas MUST be added AFTER FW areas!
+ */
+ /* ucode code RAM 256k */
+ {0x000000, 0x040000, 0xa38000, "uc_code", false, false},
+ /* ucode data RAM 32k */
+ {0x800000, 0x808000, 0xa78000, "uc_data", false, false},
};
struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE];
@@ -365,14 +420,16 @@ static const char *cmdid2name(u16 cmdid)
return "WMI_DEL_STA_CMD";
case WMI_DISCONNECT_STA_CMDID:
return "WMI_DISCONNECT_STA_CMD";
- case WMI_VRING_BA_EN_CMDID:
- return "WMI_VRING_BA_EN_CMD";
- case WMI_VRING_BA_DIS_CMDID:
- return "WMI_VRING_BA_DIS_CMD";
+ case WMI_RING_BA_EN_CMDID:
+ return "WMI_RING_BA_EN_CMD";
+ case WMI_RING_BA_DIS_CMDID:
+ return "WMI_RING_BA_DIS_CMD";
case WMI_RCP_DELBA_CMDID:
return "WMI_RCP_DELBA_CMD";
case WMI_RCP_ADDBA_RESP_CMDID:
return "WMI_RCP_ADDBA_RESP_CMD";
+ case WMI_RCP_ADDBA_RESP_EDMA_CMDID:
+ return "WMI_RCP_ADDBA_RESP_EDMA_CMD";
case WMI_PS_DEV_PROFILE_CFG_CMDID:
return "WMI_PS_DEV_PROFILE_CFG_CMD";
case WMI_SET_MGMT_RETRY_LIMIT_CMDID:
@@ -395,6 +452,22 @@ static const char *cmdid2name(u16 cmdid)
return "WMI_START_SCHED_SCAN_CMD";
case WMI_STOP_SCHED_SCAN_CMDID:
return "WMI_STOP_SCHED_SCAN_CMD";
+ case WMI_TX_STATUS_RING_ADD_CMDID:
+ return "WMI_TX_STATUS_RING_ADD_CMD";
+ case WMI_RX_STATUS_RING_ADD_CMDID:
+ return "WMI_RX_STATUS_RING_ADD_CMD";
+ case WMI_TX_DESC_RING_ADD_CMDID:
+ return "WMI_TX_DESC_RING_ADD_CMD";
+ case WMI_RX_DESC_RING_ADD_CMDID:
+ return "WMI_RX_DESC_RING_ADD_CMD";
+ case WMI_BCAST_DESC_RING_ADD_CMDID:
+ return "WMI_BCAST_DESC_RING_ADD_CMD";
+ case WMI_CFG_DEF_RX_OFFLOAD_CMDID:
+ return "WMI_CFG_DEF_RX_OFFLOAD_CMD";
+ case WMI_LINK_STATS_CMDID:
+ return "WMI_LINK_STATS_CMD";
+ case WMI_SW_TX_REQ_EXT_CMDID:
+ return "WMI_SW_TX_REQ_EXT_CMDID";
default:
return "Untracked CMD";
}
@@ -449,8 +522,8 @@ static const char *eventid2name(u16 eventid)
return "WMI_RCP_ADDBA_REQ_EVENT";
case WMI_DELBA_EVENTID:
return "WMI_DELBA_EVENT";
- case WMI_VRING_EN_EVENTID:
- return "WMI_VRING_EN_EVENT";
+ case WMI_RING_EN_EVENTID:
+ return "WMI_RING_EN_EVENT";
case WMI_DATA_PORT_OPEN_EVENTID:
return "WMI_DATA_PORT_OPEN_EVENT";
case WMI_AOA_MEAS_EVENTID:
@@ -519,6 +592,20 @@ static const char *eventid2name(u16 eventid)
return "WMI_STOP_SCHED_SCAN_EVENT";
case WMI_SCHED_SCAN_RESULT_EVENTID:
return "WMI_SCHED_SCAN_RESULT_EVENT";
+ case WMI_TX_STATUS_RING_CFG_DONE_EVENTID:
+ return "WMI_TX_STATUS_RING_CFG_DONE_EVENT";
+ case WMI_RX_STATUS_RING_CFG_DONE_EVENTID:
+ return "WMI_RX_STATUS_RING_CFG_DONE_EVENT";
+ case WMI_TX_DESC_RING_CFG_DONE_EVENTID:
+ return "WMI_TX_DESC_RING_CFG_DONE_EVENT";
+ case WMI_RX_DESC_RING_CFG_DONE_EVENTID:
+ return "WMI_RX_DESC_RING_CFG_DONE_EVENT";
+ case WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID:
+ return "WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENT";
+ case WMI_LINK_STATS_CONFIG_DONE_EVENTID:
+ return "WMI_LINK_STATS_CONFIG_DONE_EVENT";
+ case WMI_LINK_STATS_EVENTID:
+ return "WMI_LINK_STATS_EVENT";
default:
return "Untracked EVENT";
}
@@ -906,7 +993,7 @@ static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len)
wil->sta[evt->cid].mid = vif->mid;
wil->sta[evt->cid].status = wil_sta_conn_pending;
- rc = wil_tx_init(vif, evt->cid);
+ rc = wil_ring_init_tx(vif, evt->cid);
if (rc) {
wil_err(wil, "config tx vring failed for CID %d, rc (%d)\n",
evt->cid, rc);
@@ -1063,16 +1150,16 @@ static void wmi_evt_eapol_rx(struct wil6210_vif *vif, int id, void *d, int len)
}
}
-static void wmi_evt_vring_en(struct wil6210_vif *vif, int id, void *d, int len)
+static void wmi_evt_ring_en(struct wil6210_vif *vif, int id, void *d, int len)
{
struct wil6210_priv *wil = vif_to_wil(vif);
- struct wmi_vring_en_event *evt = d;
- u8 vri = evt->vring_index;
+ struct wmi_ring_en_event *evt = d;
+ u8 vri = evt->ring_index;
struct wireless_dev *wdev = vif_to_wdev(vif);
wil_dbg_wmi(wil, "Enable vring %d MID %d\n", vri, vif->mid);
- if (vri >= ARRAY_SIZE(wil->vring_tx)) {
+ if (vri >= ARRAY_SIZE(wil->ring_tx)) {
wil_err(wil, "Enable for invalid vring %d\n", vri);
return;
}
@@ -1081,8 +1168,8 @@ static void wmi_evt_vring_en(struct wil6210_vif *vif, int id, void *d, int len)
/* in AP mode with disable_ap_sme, this is done by
* wil_cfg80211_change_station()
*/
- wil->vring_tx_data[vri].dot1x_open = true;
- if (vri == vif->bcast_vring) /* no BA for bcast */
+ wil->ring_tx_data[vri].dot1x_open = true;
+ if (vri == vif->bcast_ring) /* no BA for bcast */
return;
if (agg_wsize >= 0)
wil_addba_tx_request(wil, vri, agg_wsize);
@@ -1093,7 +1180,7 @@ static void wmi_evt_ba_status(struct wil6210_vif *vif, int id,
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct wmi_ba_status_event *evt = d;
- struct vring_tx_data *txdata;
+ struct wil_ring_tx_data *txdata;
wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d AMSDU%s\n",
evt->ringid,
@@ -1112,7 +1199,7 @@ static void wmi_evt_ba_status(struct wil6210_vif *vif, int id,
evt->amsdu = 0;
}
- txdata = &wil->vring_tx_data[evt->ringid];
+ txdata = &wil->ring_tx_data[evt->ringid];
txdata->agg_timeout = le16_to_cpu(evt->ba_timeout);
txdata->agg_wsize = evt->agg_wsize;
@@ -1150,11 +1237,11 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
if (!evt->from_initiator) {
int i;
/* find Tx vring it belongs to */
- for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
- if ((wil->vring2cid_tid[i][0] == cid) &&
- (wil->vring2cid_tid[i][1] == tid)) {
- struct vring_tx_data *txdata =
- &wil->vring_tx_data[i];
+ for (i = 0; i < ARRAY_SIZE(wil->ring2cid_tid); i++) {
+ if (wil->ring2cid_tid[i][0] == cid &&
+ wil->ring2cid_tid[i][1] == tid) {
+ struct wil_ring_tx_data *txdata =
+ &wil->ring_tx_data[i];
wil_dbg_wmi(wil, "DELBA Tx vring %d\n", i);
txdata->agg_timeout = 0;
@@ -1164,7 +1251,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
break; /* max. 1 matching ring */
}
}
- if (i >= ARRAY_SIZE(wil->vring2cid_tid))
+ if (i >= ARRAY_SIZE(wil->ring2cid_tid))
wil_err(wil, "DELBA: unable to find Tx vring\n");
return;
}
@@ -1250,6 +1337,130 @@ wmi_evt_sched_scan_result(struct wil6210_vif *vif, int id, void *d, int len)
cfg80211_sched_scan_results(wiphy, 0);
}
+static void wil_link_stats_store_basic(struct wil6210_vif *vif,
+ struct wmi_link_stats_basic *basic)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ u8 cid = basic->cid;
+ struct wil_sta_info *sta;
+
+ if (cid < 0 || cid >= WIL6210_MAX_CID) {
+ wil_err(wil, "invalid cid %d\n", cid);
+ return;
+ }
+
+ sta = &wil->sta[cid];
+ sta->fw_stats_basic = *basic;
+}
+
+static void wil_link_stats_store_global(struct wil6210_vif *vif,
+ struct wmi_link_stats_global *global)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+
+ wil->fw_stats_global.stats = *global;
+}
+
+static void wmi_link_stats_parse(struct wil6210_vif *vif, u64 tsf,
+ bool has_next, void *payload,
+ size_t payload_size)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ size_t hdr_size = sizeof(struct wmi_link_stats_record);
+ size_t stats_size, record_size, expected_size;
+ struct wmi_link_stats_record *hdr;
+
+ if (payload_size < hdr_size) {
+ wil_err(wil, "link stats wrong event size %zu\n", payload_size);
+ return;
+ }
+
+ while (payload_size >= hdr_size) {
+ hdr = payload;
+ stats_size = le16_to_cpu(hdr->record_size);
+ record_size = hdr_size + stats_size;
+
+ if (payload_size < record_size) {
+ wil_err(wil, "link stats payload ended unexpectedly, size %zu < %zu\n",
+ payload_size, record_size);
+ return;
+ }
+
+ switch (hdr->record_type_id) {
+ case WMI_LINK_STATS_TYPE_BASIC:
+ expected_size = sizeof(struct wmi_link_stats_basic);
+ if (stats_size < expected_size) {
+ wil_err(wil, "link stats invalid basic record size %zu < %zu\n",
+ stats_size, expected_size);
+ return;
+ }
+ if (vif->fw_stats_ready) {
+ /* clean old statistics */
+ vif->fw_stats_tsf = 0;
+ vif->fw_stats_ready = 0;
+ }
+
+ wil_link_stats_store_basic(vif, payload + hdr_size);
+
+ if (!has_next) {
+ vif->fw_stats_tsf = tsf;
+ vif->fw_stats_ready = 1;
+ }
+
+ break;
+ case WMI_LINK_STATS_TYPE_GLOBAL:
+ expected_size = sizeof(struct wmi_link_stats_global);
+ if (stats_size < sizeof(struct wmi_link_stats_global)) {
+ wil_err(wil, "link stats invalid global record size %zu < %zu\n",
+ stats_size, expected_size);
+ return;
+ }
+
+ if (wil->fw_stats_global.ready) {
+ /* clean old statistics */
+ wil->fw_stats_global.tsf = 0;
+ wil->fw_stats_global.ready = 0;
+ }
+
+ wil_link_stats_store_global(vif, payload + hdr_size);
+
+ if (!has_next) {
+ wil->fw_stats_global.tsf = tsf;
+ wil->fw_stats_global.ready = 1;
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ /* skip to next record */
+ payload += record_size;
+ payload_size -= record_size;
+ }
+}
+
+static void
+wmi_evt_link_stats(struct wil6210_vif *vif, int id, void *d, int len)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wmi_link_stats_event *evt = d;
+ size_t payload_size;
+
+ if (len < offsetof(struct wmi_link_stats_event, payload)) {
+ wil_err(wil, "stats event way too short %d\n", len);
+ return;
+ }
+ payload_size = le16_to_cpu(evt->payload_size);
+ if (len < sizeof(struct wmi_link_stats_event) + payload_size) {
+ wil_err(wil, "stats event too short %d\n", len);
+ return;
+ }
+
+ wmi_link_stats_parse(vif, le64_to_cpu(evt->tsf), evt->has_next,
+ evt->payload, payload_size);
+}
+
/**
* Some events are ignored for purpose; and need not be interpreted as
* "unhandled events"
@@ -1277,9 +1488,10 @@ static const struct {
{WMI_BA_STATUS_EVENTID, wmi_evt_ba_status},
{WMI_RCP_ADDBA_REQ_EVENTID, wmi_evt_addba_rx_req},
{WMI_DELBA_EVENTID, wmi_evt_delba},
- {WMI_VRING_EN_EVENTID, wmi_evt_vring_en},
+ {WMI_RING_EN_EVENTID, wmi_evt_ring_en},
{WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_ignore},
{WMI_SCHED_SCAN_RESULT_EVENTID, wmi_evt_sched_scan_result},
+ {WMI_LINK_STATS_EVENTID, wmi_evt_link_stats},
};
/*
@@ -1909,7 +2121,7 @@ int wmi_rxon(struct wil6210_priv *wil, bool on)
return rc;
}
-int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
+int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring)
{
struct net_device *ndev = wil->main_ndev;
struct wireless_dev *wdev = ndev->ieee80211_ptr;
@@ -2063,29 +2275,32 @@ int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac,
int wmi_addba(struct wil6210_priv *wil, u8 mid,
u8 ringid, u8 size, u16 timeout)
{
- struct wmi_vring_ba_en_cmd cmd = {
- .ringid = ringid,
+ u8 amsdu = wil->use_enhanced_dma_hw && wil->use_rx_hw_reordering &&
+ test_bit(WMI_FW_CAPABILITY_AMSDU, wil->fw_capabilities) &&
+ wil->amsdu_en;
+ struct wmi_ring_ba_en_cmd cmd = {
+ .ring_id = ringid,
.agg_max_wsize = size,
.ba_timeout = cpu_to_le16(timeout),
- .amsdu = 0,
+ .amsdu = amsdu,
};
- wil_dbg_wmi(wil, "addba: (ring %d size %d timeout %d)\n", ringid, size,
- timeout);
+ wil_dbg_wmi(wil, "addba: (ring %d size %d timeout %d amsdu %d)\n",
+ ringid, size, timeout, amsdu);
- return wmi_send(wil, WMI_VRING_BA_EN_CMDID, mid, &cmd, sizeof(cmd));
+ return wmi_send(wil, WMI_RING_BA_EN_CMDID, mid, &cmd, sizeof(cmd));
}
int wmi_delba_tx(struct wil6210_priv *wil, u8 mid, u8 ringid, u16 reason)
{
- struct wmi_vring_ba_dis_cmd cmd = {
- .ringid = ringid,
+ struct wmi_ring_ba_dis_cmd cmd = {
+ .ring_id = ringid,
.reason = cpu_to_le16(reason),
};
wil_dbg_wmi(wil, "delba_tx: (ring %d reason %d)\n", ringid, reason);
- return wmi_send(wil, WMI_VRING_BA_DIS_CMDID, mid, &cmd, sizeof(cmd));
+ return wmi_send(wil, WMI_RING_BA_DIS_CMDID, mid, &cmd, sizeof(cmd));
}
int wmi_delba_rx(struct wil6210_priv *wil, u8 mid, u8 cidxtid, u16 reason)
@@ -2146,6 +2361,54 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil,
return rc;
}
+int wmi_addba_rx_resp_edma(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid,
+ u8 token, u16 status, bool amsdu, u16 agg_wsize,
+ u16 timeout)
+{
+ int rc;
+ struct wmi_rcp_addba_resp_edma_cmd cmd = {
+ .cid = cid,
+ .tid = tid,
+ .dialog_token = token,
+ .status_code = cpu_to_le16(status),
+ /* bit 0: A-MSDU supported
+ * bit 1: policy (should be 0 for us)
+ * bits 2..5: TID
+ * bits 6..15: buffer size
+ */
+ .ba_param_set = cpu_to_le16((amsdu ? 1 : 0) | (tid << 2) |
+ (agg_wsize << 6)),
+ .ba_timeout = cpu_to_le16(timeout),
+ /* route all the connections to status ring 0 */
+ .status_ring_id = WIL_DEFAULT_RX_STATUS_RING_ID,
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_rcp_addba_resp_sent_event evt;
+ } __packed reply = {
+ .evt = {.status = cpu_to_le16(WMI_FW_STATUS_FAILURE)},
+ };
+
+ wil_dbg_wmi(wil,
+ "ADDBA response for CID %d TID %d size %d timeout %d status %d AMSDU%s, sring_id %d\n",
+ cid, tid, agg_wsize, timeout, status, amsdu ? "+" : "-",
+ WIL_DEFAULT_RX_STATUS_RING_ID);
+
+ rc = wmi_call(wil, WMI_RCP_ADDBA_RESP_EDMA_CMDID, mid, &cmd,
+ sizeof(cmd), WMI_RCP_ADDBA_RESP_SENT_EVENTID, &reply,
+ sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc)
+ return rc;
+
+ if (reply.evt.status) {
+ wil_err(wil, "ADDBA response failed with status %d\n",
+ le16_to_cpu(reply.evt.status));
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
int wmi_ps_dev_profile_cfg(struct wil6210_priv *wil,
enum wmi_ps_profile_type ps_profile)
{
@@ -2852,3 +3115,351 @@ int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len)
return rc;
}
+
+int wmi_mgmt_tx_ext(struct wil6210_vif *vif, const u8 *buf, size_t len,
+ u8 channel, u16 duration_ms)
+{
+ size_t total;
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct ieee80211_mgmt *mgmt_frame = (void *)buf;
+ struct wmi_sw_tx_req_ext_cmd *cmd;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_sw_tx_complete_event evt;
+ } __packed evt = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+ int rc;
+
+ wil_dbg_wmi(wil, "mgmt_tx_ext mid %d channel %d duration %d\n",
+ vif->mid, channel, duration_ms);
+ wil_hex_dump_wmi("mgmt_tx_ext frame ", DUMP_PREFIX_OFFSET, 16, 1, buf,
+ len, true);
+
+ if (len < sizeof(struct ieee80211_hdr_3addr)) {
+ wil_err(wil, "short frame. len %zu\n", len);
+ return -EINVAL;
+ }
+
+ total = sizeof(*cmd) + len;
+ if (total < len) {
+ wil_err(wil, "mgmt_tx_ext invalid len %zu\n", len);
+ return -EINVAL;
+ }
+
+ cmd = kzalloc(total, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ memcpy(cmd->dst_mac, mgmt_frame->da, WMI_MAC_LEN);
+ cmd->len = cpu_to_le16(len);
+ memcpy(cmd->payload, buf, len);
+ cmd->channel = channel - 1;
+ cmd->duration_ms = cpu_to_le16(duration_ms);
+
+ rc = wmi_call(wil, WMI_SW_TX_REQ_EXT_CMDID, vif->mid, cmd, total,
+ WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
+ if (!rc && evt.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "mgmt_tx_ext failed with status %d\n",
+ evt.evt.status);
+ rc = -EINVAL;
+ }
+
+ kfree(cmd);
+
+ return rc;
+}
+
+int wil_wmi_tx_sring_cfg(struct wil6210_priv *wil, int ring_id)
+{
+ int rc;
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+ struct wil_status_ring *sring = &wil->srings[ring_id];
+ struct wmi_tx_status_ring_add_cmd cmd = {
+ .ring_cfg = {
+ .ring_size = cpu_to_le16(sring->size),
+ },
+ .irq_index = WIL_TX_STATUS_IRQ_IDX
+ };
+ struct {
+ struct wmi_cmd_hdr hdr;
+ struct wmi_tx_status_ring_cfg_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ cmd.ring_cfg.ring_id = ring_id;
+
+ cmd.ring_cfg.ring_mem_base = cpu_to_le64(sring->pa);
+ rc = wmi_call(wil, WMI_TX_STATUS_RING_ADD_CMDID, vif->mid, &cmd,
+ sizeof(cmd), WMI_TX_STATUS_RING_CFG_DONE_EVENTID,
+ &reply, sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "TX_STATUS_RING_ADD_CMD failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "TX_STATUS_RING_ADD_CMD failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ sring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
+
+ return 0;
+}
+
+int wil_wmi_cfg_def_rx_offload(struct wil6210_priv *wil, u16 max_rx_pl_per_desc)
+{
+ struct net_device *ndev = wil->main_ndev;
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+ int rc;
+ struct wmi_cfg_def_rx_offload_cmd cmd = {
+ .max_msdu_size = cpu_to_le16(wil_mtu2macbuf(WIL_MAX_ETH_MTU)),
+ .max_rx_pl_per_desc = cpu_to_le16(max_rx_pl_per_desc),
+ .decap_trans_type = WMI_DECAP_TYPE_802_3,
+ .l2_802_3_offload_ctrl = 0,
+ .l3_l4_ctrl = 1 << L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS,
+ };
+ struct {
+ struct wmi_cmd_hdr hdr;
+ struct wmi_cfg_def_rx_offload_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ rc = wmi_call(wil, WMI_CFG_DEF_RX_OFFLOAD_CMDID, vif->mid, &cmd,
+ sizeof(cmd), WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID, &reply,
+ sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "WMI_CFG_DEF_RX_OFFLOAD_CMD failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "WMI_CFG_DEF_RX_OFFLOAD_CMD failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int wil_wmi_rx_sring_add(struct wil6210_priv *wil, u16 ring_id)
+{
+ struct net_device *ndev = wil->main_ndev;
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+ struct wil_status_ring *sring = &wil->srings[ring_id];
+ int rc;
+ struct wmi_rx_status_ring_add_cmd cmd = {
+ .ring_cfg = {
+ .ring_size = cpu_to_le16(sring->size),
+ .ring_id = ring_id,
+ },
+ .rx_msg_type = wil->use_compressed_rx_status ?
+ WMI_RX_MSG_TYPE_COMPRESSED :
+ WMI_RX_MSG_TYPE_EXTENDED,
+ .irq_index = WIL_RX_STATUS_IRQ_IDX,
+ };
+ struct {
+ struct wmi_cmd_hdr hdr;
+ struct wmi_rx_status_ring_cfg_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ cmd.ring_cfg.ring_mem_base = cpu_to_le64(sring->pa);
+ rc = wmi_call(wil, WMI_RX_STATUS_RING_ADD_CMDID, vif->mid, &cmd,
+ sizeof(cmd), WMI_RX_STATUS_RING_CFG_DONE_EVENTID, &reply,
+ sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "RX_STATUS_RING_ADD_CMD failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "RX_STATUS_RING_ADD_CMD failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ sring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
+
+ return 0;
+}
+
+int wil_wmi_rx_desc_ring_add(struct wil6210_priv *wil, int status_ring_id)
+{
+ struct net_device *ndev = wil->main_ndev;
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+ struct wil_ring *ring = &wil->ring_rx;
+ int rc;
+ struct wmi_rx_desc_ring_add_cmd cmd = {
+ .ring_cfg = {
+ .ring_size = cpu_to_le16(ring->size),
+ .ring_id = WIL_RX_DESC_RING_ID,
+ },
+ .status_ring_id = status_ring_id,
+ .irq_index = WIL_RX_STATUS_IRQ_IDX,
+ };
+ struct {
+ struct wmi_cmd_hdr hdr;
+ struct wmi_rx_desc_ring_cfg_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ cmd.ring_cfg.ring_mem_base = cpu_to_le64(ring->pa);
+ cmd.sw_tail_host_addr = cpu_to_le64(ring->edma_rx_swtail.pa);
+ rc = wmi_call(wil, WMI_RX_DESC_RING_ADD_CMDID, vif->mid, &cmd,
+ sizeof(cmd), WMI_RX_DESC_RING_CFG_DONE_EVENTID, &reply,
+ sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "WMI_RX_DESC_RING_ADD_CMD failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "WMI_RX_DESC_RING_ADD_CMD failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ ring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
+
+ return 0;
+}
+
+int wil_wmi_tx_desc_ring_add(struct wil6210_vif *vif, int ring_id, int cid,
+ int tid)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ int sring_id = wil->tx_sring_idx; /* there is only one TX sring */
+ int rc;
+ struct wil_ring *ring = &wil->ring_tx[ring_id];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
+ struct wmi_tx_desc_ring_add_cmd cmd = {
+ .ring_cfg = {
+ .ring_size = cpu_to_le16(ring->size),
+ .ring_id = ring_id,
+ },
+ .status_ring_id = sring_id,
+ .cid = cid,
+ .tid = tid,
+ .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
+ .max_msdu_size = cpu_to_le16(wil_mtu2macbuf(mtu_max)),
+ .schd_params = {
+ .priority = cpu_to_le16(0),
+ .timeslot_us = cpu_to_le16(0xfff),
+ }
+ };
+ struct {
+ struct wmi_cmd_hdr hdr;
+ struct wmi_tx_desc_ring_cfg_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ cmd.ring_cfg.ring_mem_base = cpu_to_le64(ring->pa);
+ rc = wmi_call(wil, WMI_TX_DESC_RING_ADD_CMDID, vif->mid, &cmd,
+ sizeof(cmd), WMI_TX_DESC_RING_CFG_DONE_EVENTID, &reply,
+ sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "WMI_TX_DESC_RING_ADD_CMD failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "WMI_TX_DESC_RING_ADD_CMD failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&txdata->lock);
+ ring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
+ txdata->mid = vif->mid;
+ txdata->enabled = 1;
+ spin_unlock_bh(&txdata->lock);
+
+ return 0;
+}
+
+int wil_wmi_bcast_desc_ring_add(struct wil6210_vif *vif, int ring_id)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wil_ring *ring = &wil->ring_tx[ring_id];
+ int rc;
+ struct wmi_bcast_desc_ring_add_cmd cmd = {
+ .ring_cfg = {
+ .ring_size = cpu_to_le16(ring->size),
+ .ring_id = ring_id,
+ },
+ .status_ring_id = wil->tx_sring_idx,
+ .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
+ };
+ struct {
+ struct wmi_cmd_hdr hdr;
+ struct wmi_rx_desc_ring_cfg_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
+
+ cmd.ring_cfg.ring_mem_base = cpu_to_le64(ring->pa);
+ rc = wmi_call(wil, WMI_BCAST_DESC_RING_ADD_CMDID, vif->mid, &cmd,
+ sizeof(cmd), WMI_TX_DESC_RING_CFG_DONE_EVENTID, &reply,
+ sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "WMI_BCAST_DESC_RING_ADD_CMD failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "Broadcast Tx config failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&txdata->lock);
+ ring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
+ txdata->mid = vif->mid;
+ txdata->enabled = 1;
+ spin_unlock_bh(&txdata->lock);
+
+ return 0;
+}
+
+int wmi_link_stats_cfg(struct wil6210_vif *vif, u32 type, u8 cid, u32 interval)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wmi_link_stats_cmd cmd = {
+ .record_type_mask = cpu_to_le32(type),
+ .cid = cid,
+ .action = WMI_LINK_STATS_SNAPSHOT,
+ .interval_msec = cpu_to_le32(interval),
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_link_stats_config_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+ int rc;
+
+ rc = wmi_call(wil, WMI_LINK_STATS_CMDID, vif->mid, &cmd, sizeof(cmd),
+ WMI_LINK_STATS_CONFIG_DONE_EVENTID, &reply,
+ sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "WMI_LINK_STATS_CMDID failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "Link statistics config failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index dc503d903786..139acb2caf92 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -53,6 +53,17 @@
* must always be kept equal to (WMI_RF_RX2TX_LENGTH+1)
*/
#define WMI_RF_RX2TX_CONF_LENGTH (4)
+/* Qos configuration */
+#define WMI_QOS_NUM_OF_PRIORITY (4)
+#define WMI_QOS_MIN_DEFAULT_WEIGHT (10)
+#define WMI_QOS_VRING_SLOT_MIN_MS (2)
+#define WMI_QOS_VRING_SLOT_MAX_MS (10)
+/* (WMI_QOS_MIN_DEFAULT_WEIGHT * WMI_QOS_VRING_SLOT_MAX_MS /
+ * WMI_QOS_VRING_SLOT_MIN_MS)
+ */
+#define WMI_QOS_MAX_WEIGHT 50
+#define WMI_QOS_SET_VIF_PRIORITY (0xFF)
+#define WMI_QOS_DEFAULT_PRIORITY (WMI_QOS_NUM_OF_PRIORITY)
/* Mailbox interface
* used for commands and events
@@ -86,6 +97,12 @@ enum wmi_fw_capability {
WMI_FW_CAPABILITY_PNO = 15,
WMI_FW_CAPABILITY_REF_CLOCK_CONTROL = 18,
WMI_FW_CAPABILITY_AP_SME_OFFLOAD_NONE = 19,
+ WMI_FW_CAPABILITY_MULTI_VIFS = 20,
+ WMI_FW_CAPABILITY_FT_ROAMING = 21,
+ WMI_FW_CAPABILITY_BACK_WIN_SIZE_64 = 22,
+ WMI_FW_CAPABILITY_AMSDU = 23,
+ WMI_FW_CAPABILITY_RAW_MODE = 24,
+ WMI_FW_CAPABILITY_TX_REQ_EXT = 25,
WMI_FW_CAPABILITY_MAX,
};
@@ -109,6 +126,9 @@ enum wmi_command_id {
WMI_SET_PROBED_SSID_CMDID = 0x0A,
/* deprecated */
WMI_SET_LISTEN_INT_CMDID = 0x0B,
+ WMI_FT_AUTH_CMDID = 0x0C,
+ WMI_FT_REASSOC_CMDID = 0x0D,
+ WMI_UPDATE_FT_IES_CMDID = 0x0E,
WMI_BCON_CTRL_CMDID = 0x0F,
WMI_ADD_CIPHER_KEY_CMDID = 0x16,
WMI_DELETE_CIPHER_KEY_CMDID = 0x17,
@@ -117,6 +137,12 @@ enum wmi_command_id {
WMI_SET_WSC_STATUS_CMDID = 0x41,
WMI_PXMT_RANGE_CFG_CMDID = 0x42,
WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x43,
+ WMI_RADAR_GENERAL_CONFIG_CMDID = 0x100,
+ WMI_RADAR_CONFIG_SELECT_CMDID = 0x101,
+ WMI_RADAR_PARAMS_CONFIG_CMDID = 0x102,
+ WMI_RADAR_SET_MODE_CMDID = 0x103,
+ WMI_RADAR_CONTROL_CMDID = 0x104,
+ WMI_RADAR_PCI_CONTROL_CMDID = 0x105,
WMI_MEM_READ_CMDID = 0x800,
WMI_MEM_WR_CMDID = 0x801,
WMI_ECHO_CMDID = 0x803,
@@ -148,8 +174,8 @@ enum wmi_command_id {
WMI_CFG_RX_CHAIN_CMDID = 0x820,
WMI_VRING_CFG_CMDID = 0x821,
WMI_BCAST_VRING_CFG_CMDID = 0x822,
- WMI_VRING_BA_EN_CMDID = 0x823,
- WMI_VRING_BA_DIS_CMDID = 0x824,
+ WMI_RING_BA_EN_CMDID = 0x823,
+ WMI_RING_BA_DIS_CMDID = 0x824,
WMI_RCP_ADDBA_RESP_CMDID = 0x825,
WMI_RCP_DELBA_CMDID = 0x826,
WMI_SET_SSID_CMDID = 0x827,
@@ -157,12 +183,17 @@ enum wmi_command_id {
WMI_SET_PCP_CHANNEL_CMDID = 0x829,
WMI_GET_PCP_CHANNEL_CMDID = 0x82A,
WMI_SW_TX_REQ_CMDID = 0x82B,
+ /* Event is shared between WMI_SW_TX_REQ_CMDID and
+ * WMI_SW_TX_REQ_EXT_CMDID
+ */
+ WMI_SW_TX_REQ_EXT_CMDID = 0x82C,
WMI_MLME_PUSH_CMDID = 0x835,
WMI_BEAMFORMING_MGMT_CMDID = 0x836,
WMI_BF_TXSS_MGMT_CMDID = 0x837,
WMI_BF_SM_MGMT_CMDID = 0x838,
WMI_BF_RXSS_MGMT_CMDID = 0x839,
WMI_BF_TRIG_CMDID = 0x83A,
+ WMI_RCP_ADDBA_RESP_EDMA_CMDID = 0x83B,
WMI_LINK_MAINTAIN_CFG_WRITE_CMDID = 0x842,
WMI_LINK_MAINTAIN_CFG_READ_CMDID = 0x843,
WMI_SET_SECTORS_CMDID = 0x849,
@@ -205,7 +236,12 @@ enum wmi_command_id {
WMI_GET_PCP_FACTOR_CMDID = 0x91B,
/* Power Save Configuration Commands */
WMI_PS_DEV_PROFILE_CFG_CMDID = 0x91C,
+ WMI_RS_ENABLE_CMDID = 0x91E,
+ WMI_RS_CFG_EX_CMDID = 0x91F,
+ WMI_GET_DETAILED_RS_RES_EX_CMDID = 0x920,
+ /* deprecated */
WMI_RS_CFG_CMDID = 0x921,
+ /* deprecated */
WMI_GET_DETAILED_RS_RES_CMDID = 0x922,
WMI_AOA_MEAS_CMDID = 0x923,
WMI_BRP_SET_ANT_LIMIT_CMDID = 0x924,
@@ -234,7 +270,15 @@ enum wmi_command_id {
WMI_PRIO_TX_SECTORS_ORDER_CMDID = 0x9A5,
WMI_PRIO_TX_SECTORS_NUMBER_CMDID = 0x9A6,
WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_CMDID = 0x9A7,
+ /* deprecated */
WMI_BF_CONTROL_CMDID = 0x9AA,
+ WMI_BF_CONTROL_EX_CMDID = 0x9AB,
+ WMI_TX_STATUS_RING_ADD_CMDID = 0x9C0,
+ WMI_RX_STATUS_RING_ADD_CMDID = 0x9C1,
+ WMI_TX_DESC_RING_ADD_CMDID = 0x9C2,
+ WMI_RX_DESC_RING_ADD_CMDID = 0x9C3,
+ WMI_BCAST_DESC_RING_ADD_CMDID = 0x9C4,
+ WMI_CFG_DEF_RX_OFFLOAD_CMDID = 0x9C5,
WMI_SCHEDULING_SCHEME_CMDID = 0xA01,
WMI_FIXED_SCHEDULING_CONFIG_CMDID = 0xA02,
WMI_ENABLE_FIXED_SCHEDULING_CMDID = 0xA03,
@@ -244,6 +288,11 @@ enum wmi_command_id {
WMI_GET_CCA_INDICATIONS_CMDID = 0xA07,
WMI_SET_CCA_INDICATIONS_BI_AVG_NUM_CMDID = 0xA08,
WMI_INTERNAL_FW_IOCTL_CMDID = 0xA0B,
+ WMI_LINK_STATS_CMDID = 0xA0C,
+ WMI_SET_GRANT_MCS_CMDID = 0xA0E,
+ WMI_SET_AP_SLOT_SIZE_CMDID = 0xA0F,
+ WMI_SET_VRING_PRIORITY_WEIGHT_CMDID = 0xA10,
+ WMI_SET_VRING_PRIORITY_CMDID = 0xA11,
WMI_SET_MAC_ADDRESS_CMDID = 0xF003,
WMI_ABORT_SCAN_CMDID = 0xF007,
WMI_SET_PROMISCUOUS_MODE_CMDID = 0xF041,
@@ -442,6 +491,30 @@ struct wmi_start_sched_scan_cmd {
struct wmi_sched_scan_plan scan_plans[WMI_MAX_PLANS_NUM];
} __packed;
+/* WMI_FT_AUTH_CMDID */
+struct wmi_ft_auth_cmd {
+ u8 bssid[WMI_MAC_LEN];
+ /* enum wmi_channel */
+ u8 channel;
+ /* enum wmi_channel */
+ u8 edmg_channel;
+ u8 reserved[4];
+} __packed;
+
+/* WMI_FT_REASSOC_CMDID */
+struct wmi_ft_reassoc_cmd {
+ u8 bssid[WMI_MAC_LEN];
+ u8 reserved[2];
+} __packed;
+
+/* WMI_UPDATE_FT_IES_CMDID */
+struct wmi_update_ft_ies_cmd {
+ /* Length of the FT IEs */
+ __le16 ie_len;
+ u8 reserved[2];
+ u8 ie_info[0];
+} __packed;
+
/* WMI_SET_PROBED_SSID_CMDID */
#define MAX_PROBED_SSID_INDEX (3)
@@ -498,6 +571,109 @@ struct wmi_pxmt_snr2_range_cfg_cmd {
s8 snr2range_arr[2];
} __packed;
+/* WMI_RADAR_GENERAL_CONFIG_CMDID */
+struct wmi_radar_general_config_cmd {
+ /* Number of pulses (CIRs) in FW FIFO to initiate pulses transfer
+ * from FW to Host
+ */
+ __le32 fifo_watermark;
+ /* In unit of us, in the range [100, 1000000] */
+ __le32 t_burst;
+ /* Valid in the range [1, 32768], 0xFFFF means infinite */
+ __le32 n_bursts;
+ /* In unit of 330Mhz clk, in the range [4, 2000]*330 */
+ __le32 t_pulse;
+ /* In the range of [1,4096] */
+ __le16 n_pulses;
+ /* Number of taps after cTap per CIR */
+ __le16 n_samples;
+ /* Offset from the main tap (0 = zero-distance). In the range of [0,
+ * 255]
+ */
+ u8 first_sample_offset;
+ /* Number of Pulses to average, 1, 2, 4, 8 */
+ u8 pulses_to_avg;
+ /* Number of adjacent taps to average, 1, 2, 4, 8 */
+ u8 samples_to_avg;
+ /* The index to config general params */
+ u8 general_index;
+ u8 reserved[4];
+} __packed;
+
+/* WMI_RADAR_CONFIG_SELECT_CMDID */
+struct wmi_radar_config_select_cmd {
+ /* Select the general params index to use */
+ u8 general_index;
+ u8 reserved[3];
+ /* 0 means don't update burst_active_vector */
+ __le32 burst_active_vector;
+ /* 0 means don't update pulse_active_vector */
+ __le32 pulse_active_vector;
+} __packed;
+
+/* WMI_RADAR_PARAMS_CONFIG_CMDID */
+struct wmi_radar_params_config_cmd {
+ /* The burst index selected to config */
+ u8 burst_index;
+ /* 0-not active, 1-active */
+ u8 burst_en;
+ /* The pulse index selected to config */
+ u8 pulse_index;
+ /* 0-not active, 1-active */
+ u8 pulse_en;
+ /* TX RF to use on current pulse */
+ u8 tx_rfc_idx;
+ u8 tx_sector;
+ /* Offset from calibrated value.(expected to be 0)(value is row in
+ * Gain-LUT, not dB)
+ */
+ s8 tx_rf_gain_comp;
+ /* expected to be 0 */
+ s8 tx_bb_gain_comp;
+ /* RX RF to use on current pulse */
+ u8 rx_rfc_idx;
+ u8 rx_sector;
+ /* Offset from calibrated value.(expected to be 0)(value is row in
+ * Gain-LUT, not dB)
+ */
+ s8 rx_rf_gain_comp;
+ /* Value in dB.(expected to be 0) */
+ s8 rx_bb_gain_comp;
+ /* Offset from calibrated value.(expected to be 0) */
+ s8 rx_timing_offset;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_RADAR_SET_MODE_CMDID */
+struct wmi_radar_set_mode_cmd {
+ /* 0-disable/1-enable */
+ u8 enable;
+ /* enum wmi_channel */
+ u8 channel;
+ /* In the range of [0,7], 0xff means use default */
+ u8 tx_rfc_idx;
+ /* In the range of [0,7], 0xff means use default */
+ u8 rx_rfc_idx;
+} __packed;
+
+/* WMI_RADAR_CONTROL_CMDID */
+struct wmi_radar_control_cmd {
+ /* 0-stop/1-start */
+ u8 start;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_RADAR_PCI_CONTROL_CMDID */
+struct wmi_radar_pci_control_cmd {
+ /* pcie host buffer start address */
+ __le64 base_addr;
+ /* pcie host control block address */
+ __le64 control_block_addr;
+ /* pcie host buffer size */
+ __le32 buffer_size;
+ __le32 reserved;
+} __packed;
+
/* WMI_RF_MGMT_CMDID */
enum wmi_rf_mgmt_type {
WMI_RF_MGMT_W_DISABLE = 0x00,
@@ -635,12 +811,18 @@ struct wmi_pcp_start_cmd {
u8 pcp_max_assoc_sta;
u8 hidden_ssid;
u8 is_go;
- u8 reserved0[5];
+ /* enum wmi_channel WMI_CHANNEL_9..WMI_CHANNEL_12 */
+ u8 edmg_channel;
+ u8 raw_mode;
+ u8 reserved[3];
/* A-BFT length override if non-0 */
u8 abft_len;
/* enum wmi_ap_sme_offload_mode_e */
u8 ap_sme_offload_mode;
u8 network_type;
+ /* enum wmi_channel WMI_CHANNEL_1..WMI_CHANNEL_6; for EDMG this is
+ * the primary channel number
+ */
u8 channel;
u8 disable_sec_offload;
u8 disable_sec;
@@ -653,6 +835,17 @@ struct wmi_sw_tx_req_cmd {
u8 payload[0];
} __packed;
+/* WMI_SW_TX_REQ_EXT_CMDID */
+struct wmi_sw_tx_req_ext_cmd {
+ u8 dst_mac[WMI_MAC_LEN];
+ __le16 len;
+ __le16 duration_ms;
+ /* Channel to use, 0xFF for currently active channel */
+ u8 channel;
+ u8 reserved[5];
+ u8 payload[0];
+} __packed;
+
/* WMI_VRING_SWITCH_TIMING_CONFIG_CMDID */
struct wmi_vring_switch_timing_config_cmd {
/* Set vring timing configuration:
@@ -679,6 +872,7 @@ struct wmi_vring_cfg_schd {
enum wmi_vring_cfg_encap_trans_type {
WMI_VRING_ENC_TYPE_802_3 = 0x00,
WMI_VRING_ENC_TYPE_NATIVE_WIFI = 0x01,
+ WMI_VRING_ENC_TYPE_NONE = 0x02,
};
enum wmi_vring_cfg_ds_cfg {
@@ -736,7 +930,11 @@ struct wmi_vring_cfg {
u8 cid;
/* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */
u8 tid;
- u8 reserved[2];
+ /* Update the vring's priority for Qos purpose. Set to
+ * WMI_QOS_DEFAULT_PRIORITY to use MID's QoS priority
+ */
+ u8 qos_priority;
+ u8 reserved;
} __packed;
enum wmi_vring_cfg_cmd_action {
@@ -767,6 +965,78 @@ struct wmi_bcast_vring_cfg_cmd {
struct wmi_bcast_vring_cfg vring_cfg;
} __packed;
+struct wmi_edma_ring_cfg {
+ __le64 ring_mem_base;
+ /* size in number of items */
+ __le16 ring_size;
+ u8 ring_id;
+ u8 reserved;
+} __packed;
+
+enum wmi_rx_msg_type {
+ WMI_RX_MSG_TYPE_COMPRESSED = 0x00,
+ WMI_RX_MSG_TYPE_EXTENDED = 0x01,
+};
+
+struct wmi_tx_status_ring_add_cmd {
+ struct wmi_edma_ring_cfg ring_cfg;
+ u8 irq_index;
+ u8 reserved[3];
+} __packed;
+
+struct wmi_rx_status_ring_add_cmd {
+ struct wmi_edma_ring_cfg ring_cfg;
+ u8 irq_index;
+ /* wmi_rx_msg_type */
+ u8 rx_msg_type;
+ u8 reserved[2];
+} __packed;
+
+struct wmi_cfg_def_rx_offload_cmd {
+ __le16 max_msdu_size;
+ __le16 max_rx_pl_per_desc;
+ u8 decap_trans_type;
+ u8 l2_802_3_offload_ctrl;
+ u8 l2_nwifi_offload_ctrl;
+ u8 vlan_id;
+ u8 nwifi_ds_trans_type;
+ u8 l3_l4_ctrl;
+ u8 reserved[6];
+} __packed;
+
+struct wmi_tx_desc_ring_add_cmd {
+ struct wmi_edma_ring_cfg ring_cfg;
+ __le16 max_msdu_size;
+ /* Correlated status ring (0-63) */
+ u8 status_ring_id;
+ u8 cid;
+ u8 tid;
+ u8 encap_trans_type;
+ u8 mac_ctrl;
+ u8 to_resolution;
+ u8 agg_max_wsize;
+ u8 reserved[3];
+ struct wmi_vring_cfg_schd schd_params;
+} __packed;
+
+struct wmi_rx_desc_ring_add_cmd {
+ struct wmi_edma_ring_cfg ring_cfg;
+ u8 irq_index;
+ /* 0-63 status rings */
+ u8 status_ring_id;
+ u8 reserved[2];
+ __le64 sw_tail_host_addr;
+} __packed;
+
+struct wmi_bcast_desc_ring_add_cmd {
+ struct wmi_edma_ring_cfg ring_cfg;
+ __le16 max_msdu_size;
+ /* Correlated status ring (0-63) */
+ u8 status_ring_id;
+ u8 encap_trans_type;
+ u8 reserved[4];
+} __packed;
+
/* WMI_LO_POWER_CALIB_FROM_OTP_CMDID */
struct wmi_lo_power_calib_from_otp_cmd {
/* index to read from OTP. zero based */
@@ -781,18 +1051,18 @@ struct wmi_lo_power_calib_from_otp_event {
u8 reserved[3];
} __packed;
-/* WMI_VRING_BA_EN_CMDID */
-struct wmi_vring_ba_en_cmd {
- u8 ringid;
+/* WMI_RING_BA_EN_CMDID */
+struct wmi_ring_ba_en_cmd {
+ u8 ring_id;
u8 agg_max_wsize;
__le16 ba_timeout;
u8 amsdu;
u8 reserved[3];
} __packed;
-/* WMI_VRING_BA_DIS_CMDID */
-struct wmi_vring_ba_dis_cmd {
- u8 ringid;
+/* WMI_RING_BA_DIS_CMDID */
+struct wmi_ring_ba_dis_cmd {
+ u8 ring_id;
u8 reserved;
__le16 reason;
} __packed;
@@ -950,6 +1220,21 @@ struct wmi_rcp_addba_resp_cmd {
u8 reserved[2];
} __packed;
+/* WMI_RCP_ADDBA_RESP_EDMA_CMDID */
+struct wmi_rcp_addba_resp_edma_cmd {
+ u8 cid;
+ u8 tid;
+ u8 dialog_token;
+ u8 reserved;
+ __le16 status_code;
+ /* ieee80211_ba_parameterset field to send */
+ __le16 ba_param_set;
+ __le16 ba_timeout;
+ u8 status_ring_id;
+ /* wmi_cfg_rx_chain_cmd_reorder_type */
+ u8 reorder_type;
+} __packed;
+
/* WMI_RCP_DELBA_CMDID */
struct wmi_rcp_delba_cmd {
/* Used for cid less than 8. For higher cid set
@@ -999,8 +1284,8 @@ struct wmi_echo_cmd {
} __packed;
/* WMI_DEEP_ECHO_CMDID
- * Check FW and ucode are alive
- * Returned event: WMI_ECHO_RSP_EVENTID
+ * Check FW and uCode is alive
+ * Returned event: WMI_DEEP_ECHO_RSP_EVENTID
*/
struct wmi_deep_echo_cmd {
__le32 value;
@@ -1324,6 +1609,10 @@ struct wmi_fixed_scheduling_config_complete_event {
u8 reserved[3];
} __packed;
+/* This value exists for backwards compatibility only.
+ * Do not use it in new commands.
+ * Use dynamic arrays where possible.
+ */
#define WMI_NUM_MCS (13)
/* WMI_FIXED_SCHEDULING_CONFIG_CMDID */
@@ -1371,6 +1660,52 @@ struct wmi_set_multi_directed_omnis_config_event {
u8 reserved[3];
} __packed;
+/* WMI_RADAR_GENERAL_CONFIG_EVENTID */
+struct wmi_radar_general_config_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_RADAR_CONFIG_SELECT_EVENTID */
+struct wmi_radar_config_select_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+ /* In unit of bytes */
+ __le32 fifo_size;
+ /* In unit of bytes */
+ __le32 pulse_size;
+} __packed;
+
+/* WMI_RADAR_PARAMS_CONFIG_EVENTID */
+struct wmi_radar_params_config_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_RADAR_SET_MODE_EVENTID */
+struct wmi_radar_set_mode_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_RADAR_CONTROL_EVENTID */
+struct wmi_radar_control_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_RADAR_PCI_CONTROL_EVENTID */
+struct wmi_radar_pci_control_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
/* WMI_SET_LONG_RANGE_CONFIG_CMDID */
struct wmi_set_long_range_config_cmd {
__le32 reserved;
@@ -1383,12 +1718,12 @@ struct wmi_set_long_range_config_complete_event {
u8 reserved[3];
} __packed;
-/* payload max size is 236 bytes: max event buffer size (256) - WMI headers
+/* payload max size is 1024 bytes: max event buffer size (1044) - WMI headers
* (16) - prev struct field size (4)
*/
-#define WMI_MAX_IOCTL_PAYLOAD_SIZE (236)
-#define WMI_MAX_IOCTL_REPLY_PAYLOAD_SIZE (236)
-#define WMI_MAX_INTERNAL_EVENT_PAYLOAD_SIZE (236)
+#define WMI_MAX_IOCTL_PAYLOAD_SIZE (1024)
+#define WMI_MAX_IOCTL_REPLY_PAYLOAD_SIZE (1024)
+#define WMI_MAX_INTERNAL_EVENT_PAYLOAD_SIZE (1024)
enum wmi_internal_fw_ioctl_code {
WMI_INTERNAL_FW_CODE_NONE = 0x0,
@@ -1428,7 +1763,37 @@ struct wmi_internal_fw_event_event {
__le32 payload[0];
} __packed;
-/* WMI_BF_CONTROL_CMDID */
+/* WMI_SET_VRING_PRIORITY_WEIGHT_CMDID */
+struct wmi_set_vring_priority_weight_cmd {
+ /* Array of weights. Valid values are
+ * WMI_QOS_MIN_DEFAULT_WEIGHT...WMI_QOS_MAX_WEIGHT. Weight #0 is
+ * hard-coded WMI_QOS_MIN_WEIGHT. This array provide the weights
+ * #1..#3
+ */
+ u8 weight[3];
+ u8 reserved;
+} __packed;
+
+/* WMI_SET_VRING_PRIORITY_CMDID */
+struct wmi_vring_priority {
+ u8 vring_idx;
+ /* Weight index. Valid value is 0-3 */
+ u8 priority;
+ u8 reserved[2];
+} __packed;
+
+/* WMI_SET_VRING_PRIORITY_CMDID */
+struct wmi_set_vring_priority_cmd {
+ /* number of entries in vring_priority. Set to
+ * WMI_QOS_SET_VIF_PRIORITY to update the VIF's priority, and there
+ * will be only one entry in vring_priority
+ */
+ u8 num_of_vrings;
+ u8 reserved[3];
+ struct wmi_vring_priority vring_priority[0];
+} __packed;
+
+/* WMI_BF_CONTROL_CMDID - deprecated */
struct wmi_bf_control_cmd {
/* wmi_bf_triggers */
__le32 triggers;
@@ -1470,6 +1835,95 @@ struct wmi_bf_control_cmd {
u8 reserved2[2];
} __packed;
+/* BF configuration for each MCS */
+struct wmi_bf_control_ex_mcs {
+ /* Long term throughput threshold [Mbps] */
+ u8 long_term_mbps_th_tbl;
+ u8 reserved;
+ /* Long term timeout threshold table [msec] */
+ __le16 long_term_trig_timeout_per_mcs;
+} __packed;
+
+/* WMI_BF_CONTROL_EX_CMDID */
+struct wmi_bf_control_ex_cmd {
+ /* wmi_bf_triggers */
+ __le32 triggers;
+ /* enum wmi_edmg_tx_mode */
+ u8 tx_mode;
+ /* DISABLED = 0, ENABLED = 1 , DRY_RUN = 2 */
+ u8 txss_mode;
+ /* DISABLED = 0, ENABLED = 1, DRY_RUN = 2 */
+ u8 brp_mode;
+ /* Max cts threshold (correspond to
+ * WMI_BF_TRIGGER_MAX_CTS_FAILURE_IN_TXOP)
+ */
+ u8 bf_trigger_max_cts_failure_thr;
+ /* Max cts threshold in dense (correspond to
+ * WMI_BF_TRIGGER_MAX_CTS_FAILURE_IN_TXOP)
+ */
+ u8 bf_trigger_max_cts_failure_dense_thr;
+ /* Max b-ack threshold (correspond to
+ * WMI_BF_TRIGGER_MAX_BACK_FAILURE)
+ */
+ u8 bf_trigger_max_back_failure_thr;
+ /* Max b-ack threshold in dense (correspond to
+ * WMI_BF_TRIGGER_MAX_BACK_FAILURE)
+ */
+ u8 bf_trigger_max_back_failure_dense_thr;
+ u8 reserved0;
+ /* Wrong sectors threshold */
+ __le32 wrong_sector_bis_thr;
+ /* BOOL to enable/disable long term trigger */
+ u8 long_term_enable;
+ /* 1 = Update long term thresholds from the long_term_mbps_th_tbl and
+ * long_term_trig_timeout_per_mcs arrays, 0 = Ignore
+ */
+ u8 long_term_update_thr;
+ u8 each_mcs_cfg_size;
+ u8 reserved1;
+ /* Configuration for each MCS */
+ struct wmi_bf_control_ex_mcs each_mcs_cfg[0];
+} __packed;
+
+/* WMI_LINK_STATS_CMD */
+enum wmi_link_stats_action {
+ WMI_LINK_STATS_SNAPSHOT = 0x00,
+ WMI_LINK_STATS_PERIODIC = 0x01,
+ WMI_LINK_STATS_STOP_PERIODIC = 0x02,
+};
+
+/* WMI_LINK_STATS_EVENT record identifiers */
+enum wmi_link_stats_record_type {
+ WMI_LINK_STATS_TYPE_BASIC = 0x01,
+ WMI_LINK_STATS_TYPE_GLOBAL = 0x02,
+};
+
+/* WMI_LINK_STATS_CMDID */
+struct wmi_link_stats_cmd {
+ /* bitmask of required record types
+ * (wmi_link_stats_record_type_e)
+ */
+ __le32 record_type_mask;
+ /* 0xff for all cids */
+ u8 cid;
+ /* wmi_link_stats_action_e */
+ u8 action;
+ u8 reserved[6];
+ /* range = 100 - 10000 */
+ __le32 interval_msec;
+} __packed;
+
+/* WMI_SET_GRANT_MCS_CMDID */
+struct wmi_set_grant_mcs_cmd {
+ u8 mcs;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_SET_AP_SLOT_SIZE_CMDID */
+struct wmi_set_ap_slot_size_cmd {
+ __le32 slot_size;
+} __packed;
+
/* WMI Events
* List of Events (target to host)
*/
@@ -1482,10 +1936,19 @@ enum wmi_event_id {
WMI_SCHED_SCAN_RESULT_EVENTID = 0x1007,
WMI_SCAN_COMPLETE_EVENTID = 0x100A,
WMI_REPORT_STATISTICS_EVENTID = 0x100B,
+ WMI_FT_AUTH_STATUS_EVENTID = 0x100C,
+ WMI_FT_REASSOC_STATUS_EVENTID = 0x100D,
+ WMI_RADAR_GENERAL_CONFIG_EVENTID = 0x1100,
+ WMI_RADAR_CONFIG_SELECT_EVENTID = 0x1101,
+ WMI_RADAR_PARAMS_CONFIG_EVENTID = 0x1102,
+ WMI_RADAR_SET_MODE_EVENTID = 0x1103,
+ WMI_RADAR_CONTROL_EVENTID = 0x1104,
+ WMI_RADAR_PCI_CONTROL_EVENTID = 0x1105,
WMI_RD_MEM_RSP_EVENTID = 0x1800,
WMI_FW_READY_EVENTID = 0x1801,
WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x200,
WMI_ECHO_RSP_EVENTID = 0x1803,
+ WMI_DEEP_ECHO_RSP_EVENTID = 0x1804,
/* deprecated */
WMI_FS_TUNE_DONE_EVENTID = 0x180A,
/* deprecated */
@@ -1511,6 +1974,9 @@ enum wmi_event_id {
WMI_DELBA_EVENTID = 0x1826,
WMI_GET_SSID_EVENTID = 0x1828,
WMI_GET_PCP_CHANNEL_EVENTID = 0x182A,
+ /* Event is shared between WMI_SW_TX_REQ_CMDID and
+ * WMI_SW_TX_REQ_EXT_CMDID
+ */
WMI_SW_TX_COMPLETE_EVENTID = 0x182B,
WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836,
WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837,
@@ -1535,7 +2001,7 @@ enum wmi_event_id {
WMI_BF_CTRL_DONE_EVENTID = 0x1862,
WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863,
WMI_GET_STATUS_DONE_EVENTID = 0x1864,
- WMI_VRING_EN_EVENTID = 0x1865,
+ WMI_RING_EN_EVENTID = 0x1865,
WMI_GET_RF_STATUS_EVENTID = 0x1866,
WMI_GET_BASEBAND_TYPE_EVENTID = 0x1867,
WMI_VRING_SWITCH_TIMING_CONFIG_EVENTID = 0x1868,
@@ -1558,7 +2024,12 @@ enum wmi_event_id {
WMI_PCP_FACTOR_EVENTID = 0x191A,
/* Power Save Configuration Events */
WMI_PS_DEV_PROFILE_CFG_EVENTID = 0x191C,
+ WMI_RS_ENABLE_EVENTID = 0x191E,
+ WMI_RS_CFG_EX_EVENTID = 0x191F,
+ WMI_GET_DETAILED_RS_RES_EX_EVENTID = 0x1920,
+ /* deprecated */
WMI_RS_CFG_DONE_EVENTID = 0x1921,
+ /* deprecated */
WMI_GET_DETAILED_RS_RES_EVENTID = 0x1922,
WMI_AOA_MEAS_EVENTID = 0x1923,
WMI_BRP_SET_ANT_LIMIT_EVENTID = 0x1924,
@@ -1586,7 +2057,14 @@ enum wmi_event_id {
WMI_PRIO_TX_SECTORS_ORDER_EVENTID = 0x19A5,
WMI_PRIO_TX_SECTORS_NUMBER_EVENTID = 0x19A6,
WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_EVENTID = 0x19A7,
+ /* deprecated */
WMI_BF_CONTROL_EVENTID = 0x19AA,
+ WMI_BF_CONTROL_EX_EVENTID = 0x19AB,
+ WMI_TX_STATUS_RING_CFG_DONE_EVENTID = 0x19C0,
+ WMI_RX_STATUS_RING_CFG_DONE_EVENTID = 0x19C1,
+ WMI_TX_DESC_RING_CFG_DONE_EVENTID = 0x19C2,
+ WMI_RX_DESC_RING_CFG_DONE_EVENTID = 0x19C3,
+ WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID = 0x19C5,
WMI_SCHEDULING_SCHEME_EVENTID = 0x1A01,
WMI_FIXED_SCHEDULING_CONFIG_COMPLETE_EVENTID = 0x1A02,
WMI_ENABLE_FIXED_SCHEDULING_COMPLETE_EVENTID = 0x1A03,
@@ -1597,6 +2075,12 @@ enum wmi_event_id {
WMI_SET_CCA_INDICATIONS_BI_AVG_NUM_EVENTID = 0x1A08,
WMI_INTERNAL_FW_EVENT_EVENTID = 0x1A0A,
WMI_INTERNAL_FW_IOCTL_EVENTID = 0x1A0B,
+ WMI_LINK_STATS_CONFIG_DONE_EVENTID = 0x1A0C,
+ WMI_LINK_STATS_EVENTID = 0x1A0D,
+ WMI_SET_GRANT_MCS_EVENTID = 0x1A0E,
+ WMI_SET_AP_SLOT_SIZE_EVENTID = 0x1A0F,
+ WMI_SET_VRING_PRIORITY_WEIGHT_EVENTID = 0x1A10,
+ WMI_SET_VRING_PRIORITY_EVENTID = 0x1A11,
WMI_SET_CHANNEL_EVENTID = 0x9000,
WMI_ASSOC_REQ_EVENTID = 0x9001,
WMI_EAPOL_RX_EVENTID = 0x9002,
@@ -1861,6 +2345,33 @@ struct wmi_scan_complete_event {
__le32 status;
} __packed;
+/* WMI_FT_AUTH_STATUS_EVENTID */
+struct wmi_ft_auth_status_event {
+ /* enum wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+ u8 mac_addr[WMI_MAC_LEN];
+ __le16 ie_len;
+ u8 ie_info[0];
+} __packed;
+
+/* WMI_FT_REASSOC_STATUS_EVENTID */
+struct wmi_ft_reassoc_status_event {
+ /* enum wmi_fw_status */
+ u8 status;
+ /* association id received from new AP */
+ u8 aid;
+ /* enum wmi_channel */
+ u8 channel;
+ /* enum wmi_channel */
+ u8 edmg_channel;
+ u8 mac_addr[WMI_MAC_LEN];
+ __le16 beacon_ie_len;
+ __le16 reassoc_req_ie_len;
+ __le16 reassoc_resp_ie_len;
+ u8 ie_info[0];
+} __packed;
+
/* wmi_rx_mgmt_info */
struct wmi_rx_mgmt_info {
u8 mcs;
@@ -1997,6 +2508,49 @@ struct wmi_rcp_addba_resp_sent_event {
u8 reserved2[2];
} __packed;
+/* WMI_TX_STATUS_RING_CFG_DONE_EVENTID */
+struct wmi_tx_status_ring_cfg_done_event {
+ u8 ring_id;
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[2];
+ __le32 ring_tail_ptr;
+} __packed;
+
+/* WMI_RX_STATUS_RING_CFG_DONE_EVENTID */
+struct wmi_rx_status_ring_cfg_done_event {
+ u8 ring_id;
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[2];
+ __le32 ring_tail_ptr;
+} __packed;
+
+/* WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID */
+struct wmi_cfg_def_rx_offload_done_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_TX_DESC_RING_CFG_DONE_EVENTID */
+struct wmi_tx_desc_ring_cfg_done_event {
+ u8 ring_id;
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[2];
+ __le32 ring_tail_ptr;
+} __packed;
+
+/* WMI_RX_DESC_RING_CFG_DONE_EVENTID */
+struct wmi_rx_desc_ring_cfg_done_event {
+ u8 ring_id;
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[2];
+ __le32 ring_tail_ptr;
+} __packed;
+
/* WMI_RCP_ADDBA_REQ_EVENTID */
struct wmi_rcp_addba_req_event {
/* Used for cid less than 8. For higher cid set
@@ -2047,9 +2601,9 @@ struct wmi_data_port_open_event {
u8 reserved[3];
} __packed;
-/* WMI_VRING_EN_EVENTID */
-struct wmi_vring_en_event {
- u8 vring_index;
+/* WMI_RING_EN_EVENTID */
+struct wmi_ring_en_event {
+ u8 ring_index;
u8 reserved[3];
} __packed;
@@ -2174,6 +2728,11 @@ struct wmi_echo_rsp_event {
__le32 echoed_value;
} __packed;
+/* WMI_DEEP_ECHO_RSP_EVENTID */
+struct wmi_deep_echo_rsp_event {
+ __le32 echoed_value;
+} __packed;
+
/* WMI_RF_PWR_ON_DELAY_RSP_EVENTID */
struct wmi_rf_pwr_on_delay_rsp_event {
/* wmi_fw_status */
@@ -2312,6 +2871,81 @@ struct wmi_rs_cfg {
__le32 mcs_en_vec;
} __packed;
+enum wmi_edmg_tx_mode {
+ WMI_TX_MODE_DMG = 0x0,
+ WMI_TX_MODE_EDMG_CB1 = 0x1,
+ WMI_TX_MODE_EDMG_CB2 = 0x2,
+ WMI_TX_MODE_EDMG_CB1_LONG_LDPC = 0x3,
+ WMI_TX_MODE_EDMG_CB2_LONG_LDPC = 0x4,
+ WMI_TX_MODE_MAX,
+};
+
+/* Rate search parameters common configuration */
+struct wmi_rs_cfg_ex_common {
+ /* enum wmi_edmg_tx_mode */
+ u8 mode;
+ /* stop threshold [0-100] */
+ u8 stop_th;
+ /* MCS1 stop threshold [0-100] */
+ u8 mcs1_fail_th;
+ u8 max_back_failure_th;
+ /* Debug feature for disabling internal RS trigger (which is
+ * currently triggered by BF Done)
+ */
+ u8 dbg_disable_internal_trigger;
+ u8 reserved[3];
+ __le32 back_failure_mask;
+} __packed;
+
+/* Rate search parameters configuration per MCS */
+struct wmi_rs_cfg_ex_mcs {
+ /* The maximal allowed PER for each MCS
+ * MCS will be considered as failed if PER during RS is higher
+ */
+ u8 per_threshold;
+ /* Number of MPDUs for each MCS
+ * this is the minimal statistic required to make an educated
+ * decision
+ */
+ u8 min_frame_cnt;
+ u8 reserved[2];
+} __packed;
+
+/* WMI_RS_CFG_EX_CMDID */
+struct wmi_rs_cfg_ex_cmd {
+ /* Configuration for all MCSs */
+ struct wmi_rs_cfg_ex_common common_cfg;
+ u8 each_mcs_cfg_size;
+ u8 reserved[3];
+ /* Configuration for each MCS */
+ struct wmi_rs_cfg_ex_mcs each_mcs_cfg[0];
+} __packed;
+
+/* WMI_RS_CFG_EX_EVENTID */
+struct wmi_rs_cfg_ex_event {
+ /* enum wmi_edmg_tx_mode */
+ u8 mode;
+ /* enum wmi_fw_status */
+ u8 status;
+ u8 reserved[2];
+} __packed;
+
+/* WMI_RS_ENABLE_CMDID */
+struct wmi_rs_enable_cmd {
+ u8 cid;
+ /* enable or disable rate search */
+ u8 rs_enable;
+ u8 reserved[2];
+ __le32 mcs_en_vec;
+} __packed;
+
+/* WMI_RS_ENABLE_EVENTID */
+struct wmi_rs_enable_event {
+ /* enum wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
/* Slot types */
enum wmi_sched_scheme_slot_type {
WMI_SCHED_SLOT_SP = 0x0,
@@ -2404,7 +3038,7 @@ struct wmi_scheduling_scheme_event {
u8 reserved[1];
} __packed;
-/* WMI_RS_CFG_CMDID */
+/* WMI_RS_CFG_CMDID - deprecated */
struct wmi_rs_cfg_cmd {
/* connection id */
u8 cid;
@@ -2414,7 +3048,7 @@ struct wmi_rs_cfg_cmd {
struct wmi_rs_cfg rs_cfg;
} __packed;
-/* WMI_RS_CFG_DONE_EVENTID */
+/* WMI_RS_CFG_DONE_EVENTID - deprecated */
struct wmi_rs_cfg_done_event {
u8 cid;
/* enum wmi_fw_status */
@@ -2422,7 +3056,7 @@ struct wmi_rs_cfg_done_event {
u8 reserved[2];
} __packed;
-/* WMI_GET_DETAILED_RS_RES_CMDID */
+/* WMI_GET_DETAILED_RS_RES_CMDID - deprecated */
struct wmi_get_detailed_rs_res_cmd {
/* connection id */
u8 cid;
@@ -2447,7 +3081,7 @@ struct wmi_rs_results {
u8 mcs;
} __packed;
-/* WMI_GET_DETAILED_RS_RES_EVENTID */
+/* WMI_GET_DETAILED_RS_RES_EVENTID - deprecated */
struct wmi_get_detailed_rs_res_event {
u8 cid;
/* enum wmi_rs_results_status */
@@ -2457,6 +3091,45 @@ struct wmi_get_detailed_rs_res_event {
u8 reserved[3];
} __packed;
+/* WMI_GET_DETAILED_RS_RES_EX_CMDID */
+struct wmi_get_detailed_rs_res_ex_cmd {
+ u8 cid;
+ u8 reserved[3];
+} __packed;
+
+/* Rate search results */
+struct wmi_rs_results_ex_common {
+ /* RS timestamp */
+ __le32 tsf;
+ /* RS selected MCS */
+ u8 mcs;
+ /* enum wmi_edmg_tx_mode */
+ u8 mode;
+ u8 reserved[2];
+} __packed;
+
+/* Rate search results */
+struct wmi_rs_results_ex_mcs {
+ /* number of sent MPDUs */
+ u8 num_of_tx_pkt;
+ /* number of non-acked MPDUs */
+ u8 num_of_non_acked_pkt;
+ u8 reserved[2];
+} __packed;
+
+/* WMI_GET_DETAILED_RS_RES_EX_EVENTID */
+struct wmi_get_detailed_rs_res_ex_event {
+ u8 cid;
+ /* enum wmi_rs_results_status */
+ u8 status;
+ u8 reserved0[2];
+ struct wmi_rs_results_ex_common common_rs_results;
+ u8 each_mcs_results_size;
+ u8 reserved1[3];
+ /* Results for each MCS */
+ struct wmi_rs_results_ex_mcs each_mcs_results[0];
+} __packed;
+
/* BRP antenna limit mode */
enum wmi_brp_ant_limit_mode {
/* Disable BRP force antenna limit */
@@ -3207,13 +3880,20 @@ struct wmi_get_assoc_list_res_event {
u8 reserved[3];
} __packed;
-/* WMI_BF_CONTROL_EVENTID */
+/* WMI_BF_CONTROL_EVENTID - deprecated */
struct wmi_bf_control_event {
/* wmi_fw_status */
u8 status;
u8 reserved[3];
} __packed;
+/* WMI_BF_CONTROL_EX_EVENTID */
+struct wmi_bf_control_ex_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
/* WMI_COMMAND_NOT_SUPPORTED_EVENTID */
struct wmi_command_not_supported_event {
/* device id */
@@ -3283,4 +3963,96 @@ struct wmi_internal_fw_set_channel_event {
u8 reserved[3];
} __packed;
+/* WMI_LINK_STATS_CONFIG_DONE_EVENTID */
+struct wmi_link_stats_config_done_event {
+ /* wmi_fw_status_e */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_LINK_STATS_EVENTID */
+struct wmi_link_stats_event {
+ __le64 tsf;
+ __le16 payload_size;
+ u8 has_next;
+ u8 reserved[5];
+ /* a stream of wmi_link_stats_record_s */
+ u8 payload[0];
+} __packed;
+
+/* WMI_LINK_STATS_EVENT */
+struct wmi_link_stats_record {
+ /* wmi_link_stats_record_type_e */
+ u8 record_type_id;
+ u8 reserved;
+ __le16 record_size;
+ u8 record[0];
+} __packed;
+
+/* WMI_LINK_STATS_TYPE_BASIC */
+struct wmi_link_stats_basic {
+ u8 cid;
+ s8 rssi;
+ u8 sqi;
+ u8 bf_mcs;
+ u8 per_average;
+ u8 selected_rfc;
+ u8 rx_effective_ant_num;
+ u8 my_rx_sector;
+ u8 my_tx_sector;
+ u8 other_rx_sector;
+ u8 other_tx_sector;
+ u8 reserved[7];
+ /* 1/4 Db units */
+ __le16 snr;
+ __le32 tx_tpt;
+ __le32 tx_goodput;
+ __le32 rx_goodput;
+ __le32 bf_count;
+ __le32 rx_bcast_frames;
+} __packed;
+
+/* WMI_LINK_STATS_TYPE_GLOBAL */
+struct wmi_link_stats_global {
+ /* all ack-able frames */
+ __le32 rx_frames;
+ /* all ack-able frames */
+ __le32 tx_frames;
+ __le32 rx_ba_frames;
+ __le32 tx_ba_frames;
+ __le32 tx_beacons;
+ __le32 rx_mic_errors;
+ __le32 rx_crc_errors;
+ __le32 tx_fail_no_ack;
+ u8 reserved[8];
+} __packed;
+
+/* WMI_SET_GRANT_MCS_EVENTID */
+struct wmi_set_grant_mcs_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_SET_AP_SLOT_SIZE_EVENTID */
+struct wmi_set_ap_slot_size_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_SET_VRING_PRIORITY_WEIGHT_EVENTID */
+struct wmi_set_vring_priority_weight_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_SET_VRING_PRIORITY_EVENTID */
+struct wmi_set_vring_priority_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
#endif /* __WILOCITY_WMI_H__ */
diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c
index b01dc34d55af..74538085cfb7 100644
--- a/drivers/net/wireless/atmel/atmel.c
+++ b/drivers/net/wireless/atmel/atmel.c
@@ -1399,6 +1399,7 @@ static int atmel_validate_channel(struct atmel_private *priv, int channel)
return 0;
}
+#ifdef CONFIG_PROC_FS
static int atmel_proc_show(struct seq_file *m, void *v)
{
struct atmel_private *priv = m->private;
@@ -1481,6 +1482,7 @@ static int atmel_proc_show(struct seq_file *m, void *v)
seq_printf(m, "Current state:\t\t%s\n", s);
return 0;
}
+#endif
static const struct net_device_ops atmel_netdev_ops = {
.ndo_open = atmel_open,
@@ -1516,10 +1518,9 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
priv->present_callback = card_present;
priv->card = card;
priv->firmware = NULL;
- priv->firmware_id[0] = '\0';
priv->firmware_type = fw_type;
if (firmware) /* module parameter */
- strcpy(priv->firmware_id, firmware);
+ strlcpy(priv->firmware_id, firmware, sizeof(priv->firmware_id));
priv->bus_type = card_present ? BUS_TYPE_PCCARD : BUS_TYPE_PCI;
priv->station_state = STATION_STATE_DOWN;
priv->do_rx_crc = 0;
@@ -2646,14 +2647,9 @@ static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
break;
}
- if (!(new_firmware = kmalloc(com.len, GFP_KERNEL))) {
- rc = -ENOMEM;
- break;
- }
-
- if (copy_from_user(new_firmware, com.data, com.len)) {
- kfree(new_firmware);
- rc = -EFAULT;
+ new_firmware = memdup_user(com.data, com.len);
+ if (IS_ERR(new_firmware)) {
+ rc = PTR_ERR(new_firmware);
break;
}
@@ -3681,7 +3677,7 @@ static int probe_atmel_card(struct net_device *dev)
atmel_write16(dev, GCR, 0x0060);
atmel_write16(dev, GCR, 0x0040);
- mdelay(500);
+ msleep(500);
if (atmel_read16(dev, MR2) == 0) {
/* No stored firmware so load a small stub which just
diff --git a/drivers/net/wireless/broadcom/b43/leds.c b/drivers/net/wireless/broadcom/b43/leds.c
index cb987c2ecc6b..87131f663292 100644
--- a/drivers/net/wireless/broadcom/b43/leds.c
+++ b/drivers/net/wireless/broadcom/b43/leds.c
@@ -131,7 +131,7 @@ static int b43_register_led(struct b43_wldev *dev, struct b43_led *led,
led->wl = dev->wl;
led->index = led_index;
led->activelow = activelow;
- strncpy(led->name, name, sizeof(led->name));
+ strlcpy(led->name, name, sizeof(led->name));
atomic_set(&led->state, 0);
led->led_dev.name = led->name;
diff --git a/drivers/net/wireless/broadcom/b43legacy/leds.c b/drivers/net/wireless/broadcom/b43legacy/leds.c
index fd4565389c77..bc922118b6ac 100644
--- a/drivers/net/wireless/broadcom/b43legacy/leds.c
+++ b/drivers/net/wireless/broadcom/b43legacy/leds.c
@@ -101,7 +101,7 @@ static int b43legacy_register_led(struct b43legacy_wldev *dev,
led->dev = dev;
led->index = led_index;
led->activelow = activelow;
- strncpy(led->name, name, sizeof(led->name));
+ strlcpy(led->name, name, sizeof(led->name));
led->led_dev.name = led->name;
led->led_dev.default_trigger = default_trigger;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index b6122aad639e..5444e6213d45 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -2434,7 +2434,7 @@ static void brcmf_convert_sta_flags(u32 fw_sta_flags, struct station_info *si)
struct nl80211_sta_flag_update *sfu;
brcmf_dbg(TRACE, "flags %08x\n", fw_sta_flags);
- si->filled |= BIT(NL80211_STA_INFO_STA_FLAGS);
+ si->filled |= BIT_ULL(NL80211_STA_INFO_STA_FLAGS);
sfu = &si->sta_flags;
sfu->mask = BIT(NL80211_STA_FLAG_WME) |
BIT(NL80211_STA_FLAG_AUTHENTICATED) |
@@ -2470,7 +2470,7 @@ static void brcmf_fill_bss_param(struct brcmf_if *ifp, struct station_info *si)
brcmf_err("Failed to get bss info (%d)\n", err);
goto out_kfree;
}
- si->filled |= BIT(NL80211_STA_INFO_BSS_PARAM);
+ si->filled |= BIT_ULL(NL80211_STA_INFO_BSS_PARAM);
si->bss_param.beacon_interval = le16_to_cpu(buf->bss_le.beacon_period);
si->bss_param.dtim_period = buf->bss_le.dtim_period;
capability = le16_to_cpu(buf->bss_le.capability);
@@ -2501,7 +2501,7 @@ brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp,
brcmf_err("BRCMF_C_GET_RATE error (%d)\n", err);
return err;
}
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
sinfo->txrate.legacy = rate * 5;
memset(&scbval, 0, sizeof(scbval));
@@ -2512,7 +2512,7 @@ brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp,
return err;
}
rssi = le32_to_cpu(scbval.val);
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
sinfo->signal = rssi;
err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_GET_PKTCNTS, &pktcnt,
@@ -2521,10 +2521,10 @@ brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp,
brcmf_err("BRCMF_C_GET_GET_PKTCNTS error (%d)\n", err);
return err;
}
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS) |
- BIT(NL80211_STA_INFO_RX_DROP_MISC) |
- BIT(NL80211_STA_INFO_TX_PACKETS) |
- BIT(NL80211_STA_INFO_TX_FAILED);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS) |
+ BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC) |
+ BIT_ULL(NL80211_STA_INFO_TX_PACKETS) |
+ BIT_ULL(NL80211_STA_INFO_TX_FAILED);
sinfo->rx_packets = le32_to_cpu(pktcnt.rx_good_pkt);
sinfo->rx_dropped_misc = le32_to_cpu(pktcnt.rx_bad_pkt);
sinfo->tx_packets = le32_to_cpu(pktcnt.tx_good_pkt);
@@ -2571,7 +2571,7 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
}
}
brcmf_dbg(TRACE, "version %d\n", le16_to_cpu(sta_info_le.ver));
- sinfo->filled = BIT(NL80211_STA_INFO_INACTIVE_TIME);
+ sinfo->filled = BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME);
sinfo->inactive_time = le32_to_cpu(sta_info_le.idle) * 1000;
sta_flags = le32_to_cpu(sta_info_le.flags);
brcmf_convert_sta_flags(sta_flags, sinfo);
@@ -2581,33 +2581,33 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
else
sinfo->sta_flags.set &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
if (sta_flags & BRCMF_STA_ASSOC) {
- sinfo->filled |= BIT(NL80211_STA_INFO_CONNECTED_TIME);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CONNECTED_TIME);
sinfo->connected_time = le32_to_cpu(sta_info_le.in);
brcmf_fill_bss_param(ifp, sinfo);
}
if (sta_flags & BRCMF_STA_SCBSTATS) {
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_FAILED);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
sinfo->tx_failed = le32_to_cpu(sta_info_le.tx_failures);
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
sinfo->tx_packets = le32_to_cpu(sta_info_le.tx_pkts);
sinfo->tx_packets += le32_to_cpu(sta_info_le.tx_mcast_pkts);
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS);
sinfo->rx_packets = le32_to_cpu(sta_info_le.rx_ucast_pkts);
sinfo->rx_packets += le32_to_cpu(sta_info_le.rx_mcast_pkts);
if (sinfo->tx_packets) {
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
sinfo->txrate.legacy =
le32_to_cpu(sta_info_le.tx_rate) / 100;
}
if (sinfo->rx_packets) {
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
sinfo->rxrate.legacy =
le32_to_cpu(sta_info_le.rx_rate) / 100;
}
if (le16_to_cpu(sta_info_le.ver) >= 4) {
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES);
sinfo->tx_bytes = le64_to_cpu(sta_info_le.tx_tot_bytes);
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES);
sinfo->rx_bytes = le64_to_cpu(sta_info_le.rx_tot_bytes);
}
total_rssi = 0;
@@ -2623,10 +2623,10 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
}
}
if (count_rssi) {
- sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
sinfo->chains = count_rssi;
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
total_rssi /= count_rssi;
sinfo->signal = total_rssi;
} else if (test_bit(BRCMF_VIF_STATUS_CONNECTED,
@@ -2639,7 +2639,7 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
goto done;
} else {
rssi = le32_to_cpu(scb_val.val);
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
sinfo->signal = rssi;
brcmf_dbg(CONN, "RSSI %d dBm\n", rssi);
}
@@ -6926,15 +6926,15 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
cfg->d11inf.io_type = (u8)io_type;
brcmu_d11_attach(&cfg->d11inf);
- err = brcmf_setup_wiphy(wiphy, ifp);
- if (err < 0)
- goto priv_out;
-
/* regulatory notifer below needs access to cfg so
* assign it now.
*/
drvr->config = cfg;
+ err = brcmf_setup_wiphy(wiphy, ifp);
+ if (err < 0)
+ goto priv_out;
+
brcmf_dbg(INFO, "Registering custom regulatory\n");
wiphy->reg_notifier = brcmf_cfg80211_reg_notifier;
wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 72954fd6df3b..b1f702faff4f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -21,6 +21,7 @@
#include <net/cfg80211.h>
#include <net/rtnetlink.h>
#include <net/addrconf.h>
+#include <net/ieee80211_radiotap.h>
#include <net/ipv6.h>
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
@@ -404,6 +405,30 @@ void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
netif_rx_ni(skb);
}
+void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb)
+{
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MONITOR_FMT_RADIOTAP)) {
+ /* Do nothing */
+ } else {
+ struct ieee80211_radiotap_header *radiotap;
+
+ /* TODO: use RX status to fill some radiotap data */
+ radiotap = skb_push(skb, sizeof(*radiotap));
+ memset(radiotap, 0, sizeof(*radiotap));
+ radiotap->it_len = cpu_to_le16(sizeof(*radiotap));
+
+ /* TODO: 4 bytes with receive status? */
+ skb->len -= 4;
+ }
+
+ skb->dev = ifp->ndev;
+ skb_reset_mac_header(skb);
+ skb->pkt_type = PACKET_OTHERHOST;
+ skb->protocol = htons(ETH_P_802_2);
+
+ brcmf_netif_rx(ifp, skb);
+}
+
static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
struct brcmf_if **ifp)
{
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index 401f50458686..dcf6e27cc16f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -121,6 +121,7 @@ struct brcmf_pub {
struct brcmf_if *iflist[BRCMF_MAX_IFS];
s32 if2bss[BRCMF_MAX_IFS];
+ struct brcmf_if *mon_if;
struct mutex proto_block;
unsigned char proto_buf[BRCMF_DCMD_MAXLEN];
@@ -216,6 +217,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
enum brcmf_netif_stop_reason reason, bool state);
void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
+void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb);
void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on);
int __init brcmf_core_init(void);
void __exit brcmf_core_exit(void);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 800a423c7bc2..8347da632a5b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -48,6 +48,8 @@ static const struct brcmf_feat_fwcap brcmf_fwcap_map[] = {
{ BRCMF_FEAT_MBSS, "mbss" },
{ BRCMF_FEAT_MCHAN, "mchan" },
{ BRCMF_FEAT_P2P, "p2p" },
+ { BRCMF_FEAT_MONITOR, "monitor" },
+ { BRCMF_FEAT_MONITOR_FMT_RADIOTAP, "rtap" },
};
#ifdef DEBUG
@@ -91,6 +93,42 @@ static int brcmf_feat_debugfs_read(struct seq_file *seq, void *data)
}
#endif /* DEBUG */
+struct brcmf_feat_fwfeat {
+ const char * const fwid;
+ u32 feat_flags;
+};
+
+static const struct brcmf_feat_fwfeat brcmf_feat_fwfeat_map[] = {
+ /* brcmfmac43602-pcie.ap.bin from linux-firmware.git commit ea1178515b88 */
+ { "01-6cb8e269", BIT(BRCMF_FEAT_MONITOR) },
+ /* brcmfmac4366b-pcie.bin from linux-firmware.git commit 52442afee990 */
+ { "01-c47a91a4", BIT(BRCMF_FEAT_MONITOR) },
+};
+
+static void brcmf_feat_firmware_overrides(struct brcmf_pub *drv)
+{
+ const struct brcmf_feat_fwfeat *e;
+ u32 feat_flags = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(brcmf_feat_fwfeat_map); i++) {
+ e = &brcmf_feat_fwfeat_map[i];
+ if (!strcmp(e->fwid, drv->fwver)) {
+ feat_flags = e->feat_flags;
+ break;
+ }
+ }
+
+ if (!feat_flags)
+ return;
+
+ for (i = 0; i < BRCMF_FEAT_LAST; i++)
+ if (feat_flags & BIT(i))
+ brcmf_dbg(INFO, "enabling firmware feature: %s\n",
+ brcmf_feat_names[i]);
+ drv->feat_flags |= feat_flags;
+}
+
/**
* brcmf_feat_iovar_int_get() - determine feature through iovar query.
*
@@ -251,6 +289,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
}
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa");
+ brcmf_feat_firmware_overrides(drvr);
+
/* set chip related quirks */
switch (drvr->bus_if->chip) {
case BRCM_CC_43236_CHIP_ID:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
index d1193825e559..0b4974df353a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
@@ -33,6 +33,8 @@
* MFP: 802.11w Management Frame Protection.
* GSCAN: enhanced scan offload feature.
* FWSUP: Firmware supplicant.
+ * MONITOR: firmware can pass monitor packets to host.
+ * MONITOR_FMT_RADIOTAP: firmware provides monitor packets with radiotap header
*/
#define BRCMF_FEAT_LIST \
BRCMF_FEAT_DEF(MBSS) \
@@ -48,7 +50,9 @@
BRCMF_FEAT_DEF(WOWL_ARP_ND) \
BRCMF_FEAT_DEF(MFP) \
BRCMF_FEAT_DEF(GSCAN) \
- BRCMF_FEAT_DEF(FWSUP)
+ BRCMF_FEAT_DEF(FWSUP) \
+ BRCMF_FEAT_DEF(MONITOR) \
+ BRCMF_FEAT_DEF(MONITOR_FMT_RADIOTAP)
/*
* Quirks:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index 4b290705e3e6..d5bb81e88762 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -32,11 +32,30 @@
#define BRCMF_BSS_INFO_VERSION 109 /* curr ver of brcmf_bss_info_le struct */
#define BRCMF_BSS_RSSI_ON_CHANNEL 0x0002
-#define BRCMF_STA_WME 0x00000002 /* WMM association */
-#define BRCMF_STA_AUTHE 0x00000008 /* Authenticated */
-#define BRCMF_STA_ASSOC 0x00000010 /* Associated */
-#define BRCMF_STA_AUTHO 0x00000020 /* Authorized */
-#define BRCMF_STA_SCBSTATS 0x00004000 /* Per STA debug stats */
+#define BRCMF_STA_BRCM 0x00000001 /* Running a Broadcom driver */
+#define BRCMF_STA_WME 0x00000002 /* WMM association */
+#define BRCMF_STA_NONERP 0x00000004 /* No ERP */
+#define BRCMF_STA_AUTHE 0x00000008 /* Authenticated */
+#define BRCMF_STA_ASSOC 0x00000010 /* Associated */
+#define BRCMF_STA_AUTHO 0x00000020 /* Authorized */
+#define BRCMF_STA_WDS 0x00000040 /* Wireless Distribution System */
+#define BRCMF_STA_WDS_LINKUP 0x00000080 /* WDS traffic/probes flowing properly */
+#define BRCMF_STA_PS 0x00000100 /* STA is in power save mode from AP's viewpoint */
+#define BRCMF_STA_APSD_BE 0x00000200 /* APSD delv/trigger for AC_BE is default enabled */
+#define BRCMF_STA_APSD_BK 0x00000400 /* APSD delv/trigger for AC_BK is default enabled */
+#define BRCMF_STA_APSD_VI 0x00000800 /* APSD delv/trigger for AC_VI is default enabled */
+#define BRCMF_STA_APSD_VO 0x00001000 /* APSD delv/trigger for AC_VO is default enabled */
+#define BRCMF_STA_N_CAP 0x00002000 /* STA 802.11n capable */
+#define BRCMF_STA_SCBSTATS 0x00004000 /* Per STA debug stats */
+#define BRCMF_STA_AMPDU_CAP 0x00008000 /* STA AMPDU capable */
+#define BRCMF_STA_AMSDU_CAP 0x00010000 /* STA AMSDU capable */
+#define BRCMF_STA_MIMO_PS 0x00020000 /* mimo ps mode is enabled */
+#define BRCMF_STA_MIMO_RTS 0x00040000 /* send rts in mimo ps mode */
+#define BRCMF_STA_RIFS_CAP 0x00080000 /* rifs enabled */
+#define BRCMF_STA_VHT_CAP 0x00100000 /* STA VHT(11ac) capable */
+#define BRCMF_STA_WPS 0x00200000 /* WPS state */
+#define BRCMF_STA_DWDS_CAP 0x01000000 /* DWDS CAP */
+#define BRCMF_STA_DWDS 0x02000000 /* DWDS active */
/* size of brcmf_scan_params not including variable length array */
#define BRCMF_SCAN_PARAMS_FIXED_SIZE 64
@@ -155,6 +174,8 @@
#define BRCMF_MFP_CAPABLE 1
#define BRCMF_MFP_REQUIRED 2
+#define BRCMF_VHT_CAP_MCS_MAP_NSS_MAX 8
+
/* MAX_CHUNK_LEN is the maximum length for data passing to firmware in each
* ioctl. It is relatively small because firmware has small maximum size input
* playload restriction for ioctls.
@@ -531,6 +552,8 @@ struct brcmf_sta_info_le {
/* w/hi bit set if basic */
__le32 in; /* seconds elapsed since associated */
__le32 listen_interval_inms; /* Min Listen interval in ms for STA */
+
+ /* Fields valid for ver >= 3 */
__le32 tx_pkts; /* # of packets transmitted */
__le32 tx_failures; /* # of packets failed */
__le32 rx_ucast_pkts; /* # of unicast packets received */
@@ -539,6 +562,8 @@ struct brcmf_sta_info_le {
__le32 rx_rate; /* Rate of last successful rx frame */
__le32 rx_decrypt_succeeds; /* # of packet decrypted successfully */
__le32 rx_decrypt_failures; /* # of packet decrypted failed */
+
+ /* Fields valid for ver >= 4 */
__le32 tx_tot_pkts; /* # of tx pkts (ucast + mcast) */
__le32 rx_tot_pkts; /* # of data packets recvd (uni + mcast) */
__le32 tx_mcast_pkts; /* # of mcast pkts txed */
@@ -575,6 +600,14 @@ struct brcmf_sta_info_le {
*/
__le32 rx_pkts_retried; /* # rx with retry bit set */
__le32 tx_rate_fallback; /* lowest fallback TX rate */
+
+ /* Fields valid for ver >= 5 */
+ struct {
+ __le32 count; /* # rates in this set */
+ u8 rates[BRCMF_MAXRATES_IN_SET]; /* rates in 500kbps units w/hi bit set if basic */
+ u8 mcs[BRCMF_MCSSET_LEN]; /* supported mcs index bit map */
+ __le16 vht_mcs[BRCMF_VHT_CAP_MCS_MAP_NSS_MAX]; /* supported mcs index bit map per nss */
+ } rateset_adv;
};
struct brcmf_chanspec_list {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index c40ba8855cd5..4e8397a0cbc8 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -69,6 +69,8 @@
#define BRCMF_MSGBUF_MAX_EVENTBUF_POST 8
#define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3 0x01
+#define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_11 0x02
+#define BRCMF_MSGBUF_PKT_FLAGS_FRAME_MASK 0x07
#define BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT 5
#define BRCMF_MSGBUF_TX_FLUSH_CNT1 32
@@ -1128,6 +1130,7 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
struct sk_buff *skb;
u16 data_offset;
u16 buflen;
+ u16 flags;
u32 idx;
struct brcmf_if *ifp;
@@ -1137,6 +1140,7 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
data_offset = le16_to_cpu(rx_complete->data_offset);
buflen = le16_to_cpu(rx_complete->data_len);
idx = le32_to_cpu(rx_complete->msg.request_id);
+ flags = le16_to_cpu(rx_complete->flags);
skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
msgbuf->rx_pktids, idx);
@@ -1150,6 +1154,20 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
skb_trim(skb, buflen);
+ if ((flags & BRCMF_MSGBUF_PKT_FLAGS_FRAME_MASK) ==
+ BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_11) {
+ ifp = msgbuf->drvr->mon_if;
+
+ if (!ifp) {
+ brcmf_err("Received unexpected monitor pkt\n");
+ brcmu_pkt_buf_free_skb(skb);
+ return;
+ }
+
+ brcmf_netif_mon_rx(ifp, skb);
+ return;
+ }
+
ifp = brcmf_get_ifp(msgbuf->drvr, rx_complete->msg.ifidx);
if (!ifp || !ifp->ndev) {
brcmf_err("Received pkt for invalid ifidx %d\n",
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
index 3a13d176b221..35e3b101e5cf 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
@@ -159,7 +159,7 @@ u16 read_radio_reg(struct brcms_phy *pi, u16 addr)
{
u16 data;
- if ((addr == RADIO_IDCODE))
+ if (addr == RADIO_IDCODE)
return 0xffff;
switch (pi->pubpi.phy_type) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
index 1a187557982e..bedec1606caa 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
@@ -16904,7 +16904,7 @@ static void wlc_phy_workarounds_nphy_rev3(struct brcms_phy *pi)
}
}
-void wlc_phy_workarounds_nphy_rev1(struct brcms_phy *pi)
+static void wlc_phy_workarounds_nphy_rev1(struct brcms_phy *pi)
{
static const u8 rfseq_rx2tx_events[] = {
NPHY_RFSEQ_CMD_NOP,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c
index b9672da24a9d..b24bc57ca91b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c
@@ -213,7 +213,7 @@ static const s16 log_table[] = {
30498,
31267,
32024,
- 32768
+ 32767
};
#define LOG_TABLE_SIZE 32 /* log_table size */
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index 72046e182745..04dd7a936593 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -3419,7 +3419,7 @@ done:
static void airo_handle_tx(struct airo_info *ai, u16 status)
{
- int i, len = 0, index = -1;
+ int i, index = -1;
u16 fid;
if (test_bit(FLAG_MPI, &ai->flags)) {
@@ -3443,11 +3443,9 @@ static void airo_handle_tx(struct airo_info *ai, u16 status)
fid = IN4500(ai, TXCOMPLFID);
- for(i = 0; i < MAX_FIDS; i++) {
- if ((ai->fids[i] & 0xffff) == fid) {
- len = ai->fids[i] >> 16;
+ for (i = 0; i < MAX_FIDS; i++) {
+ if ((ai->fids[i] & 0xffff) == fid)
index = i;
- }
}
if (index != -1) {
diff --git a/drivers/net/wireless/cisco/airo_cs.c b/drivers/net/wireless/cisco/airo_cs.c
index d9ed22b4cc6b..3718f958c0fc 100644
--- a/drivers/net/wireless/cisco/airo_cs.c
+++ b/drivers/net/wireless/cisco/airo_cs.c
@@ -102,11 +102,8 @@ static int airo_cs_config_check(struct pcmcia_device *p_dev, void *priv_data)
static int airo_config(struct pcmcia_device *link)
{
- struct local_info *dev;
int ret;
- dev = link->priv;
-
dev_dbg(&link->dev, "airo_config\n");
link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_VPP |
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index b8fd3cc90634..910db46db6a1 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -692,7 +692,7 @@ static void printk_buf(int level, const u8 * data, u32 len)
static void schedule_reset(struct ipw2100_priv *priv)
{
- unsigned long now = get_seconds();
+ time64_t now = ktime_get_boottime_seconds();
/* If we haven't received a reset request within the backoff period,
* then we can reset the backoff interval so this reset occurs
@@ -701,10 +701,10 @@ static void schedule_reset(struct ipw2100_priv *priv)
(now - priv->last_reset > priv->reset_backoff))
priv->reset_backoff = 0;
- priv->last_reset = get_seconds();
+ priv->last_reset = now;
if (!(priv->status & STATUS_RESET_PENDING)) {
- IPW_DEBUG_INFO("%s: Scheduling firmware restart (%ds).\n",
+ IPW_DEBUG_INFO("%s: Scheduling firmware restart (%llds).\n",
priv->net_dev->name, priv->reset_backoff);
netif_carrier_off(priv->net_dev);
netif_stop_queue(priv->net_dev);
@@ -2079,7 +2079,7 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status)
memcpy(priv->bssid, bssid, ETH_ALEN);
priv->status |= STATUS_ASSOCIATING;
- priv->connect_start = get_seconds();
+ priv->connect_start = ktime_get_boottime_seconds();
schedule_delayed_work(&priv->wx_event_work, HZ / 10);
}
@@ -4070,8 +4070,8 @@ static ssize_t show_internals(struct device *d, struct device_attribute *attr,
#define DUMP_VAR(x,y) len += sprintf(buf + len, # x ": %" y "\n", priv-> x)
if (priv->status & STATUS_ASSOCIATED)
- len += sprintf(buf + len, "connected: %lu\n",
- get_seconds() - priv->connect_start);
+ len += sprintf(buf + len, "connected: %llu\n",
+ ktime_get_boottime_seconds() - priv->connect_start);
else
len += sprintf(buf + len, "not connected\n");
@@ -4108,7 +4108,7 @@ static ssize_t show_internals(struct device *d, struct device_attribute *attr,
DUMP_VAR(txq_stat.lo, "d");
DUMP_VAR(ieee->scans, "d");
- DUMP_VAR(reset_backoff, "d");
+ DUMP_VAR(reset_backoff, "lld");
return len;
}
@@ -5112,11 +5112,9 @@ static int ipw2100_disassociate_bssid(struct ipw2100_priv *priv)
.host_command_length = ETH_ALEN
};
int err;
- int len;
IPW_DEBUG_HC("DISASSOCIATION_BSSID\n");
- len = ETH_ALEN;
/* The Firmware currently ignores the BSSID and just disassociates from
* the currently associated AP -- but in the off chance that a future
* firmware does use the BSSID provided here, we go ahead and try and
@@ -6437,7 +6435,7 @@ static int ipw2100_suspend(struct pci_dev *pci_dev, pm_message_t state)
pci_disable_device(pci_dev);
pci_set_power_state(pci_dev, PCI_D3hot);
- priv->suspend_at = get_seconds();
+ priv->suspend_at = ktime_get_boottime_seconds();
mutex_unlock(&priv->action_mutex);
@@ -6482,7 +6480,7 @@ static int ipw2100_resume(struct pci_dev *pci_dev)
* the queue of needed */
netif_device_attach(dev);
- priv->suspend_time = get_seconds() - priv->suspend_at;
+ priv->suspend_time = ktime_get_boottime_seconds() - priv->suspend_at;
/* Bring the device back up */
if (!(priv->status & STATUS_RF_KILL_SW))
@@ -7723,7 +7721,6 @@ static int ipw2100_wx_get_auth(struct net_device *dev,
struct libipw_device *ieee = priv->ieee;
struct lib80211_crypt_data *crypt;
struct iw_param *param = &wrqu->param;
- int ret = 0;
switch (param->flags & IW_AUTH_INDEX) {
case IW_AUTH_WPA_VERSION:
@@ -7733,7 +7730,6 @@ static int ipw2100_wx_get_auth(struct net_device *dev,
/*
* wpa_supplicant will control these internally
*/
- ret = -EOPNOTSUPP;
break;
case IW_AUTH_TKIP_COUNTERMEASURES:
@@ -7801,9 +7797,6 @@ static int ipw2100_wx_set_mlme(struct net_device *dev,
{
struct ipw2100_priv *priv = libipw_priv(dev);
struct iw_mlme *mlme = (struct iw_mlme *)extra;
- __le16 reason;
-
- reason = cpu_to_le16(mlme->reason_code);
switch (mlme->cmd) {
case IW_MLME_DEAUTH:
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.h b/drivers/net/wireless/intel/ipw2x00/ipw2100.h
index ce3e35f6b60f..8c11c7fa2eef 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.h
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.h
@@ -491,7 +491,7 @@ struct ipw2100_priv {
/* Statistics */
int resets;
- int reset_backoff;
+ time64_t reset_backoff;
/* Context */
u8 essid[IW_ESSID_MAX_SIZE];
@@ -500,8 +500,8 @@ struct ipw2100_priv {
u8 channel;
int last_mode;
- unsigned long connect_start;
- unsigned long last_reset;
+ time64_t connect_start;
+ time64_t last_reset;
u32 channel_mask;
u32 fatal_error;
@@ -581,9 +581,9 @@ struct ipw2100_priv {
int user_requested_scan;
- /* Track time in suspend */
- unsigned long suspend_at;
- unsigned long suspend_time;
+ /* Track time in suspend, using CLOCK_BOOTTIME */
+ time64_t suspend_at;
+ time64_t suspend_time;
u32 interrupts;
int tx_interrupts;
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index 8a858f7e36f4..9644e7b93645 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -7112,7 +7112,7 @@ static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
{
u32 ret = 0;
- if ((priv == NULL))
+ if (!priv)
return 0;
if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION))
@@ -11888,7 +11888,7 @@ static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
- priv->suspend_at = get_seconds();
+ priv->suspend_at = ktime_get_boottime_seconds();
return 0;
}
@@ -11925,7 +11925,7 @@ static int ipw_pci_resume(struct pci_dev *pdev)
* the queue of needed */
netif_device_attach(dev);
- priv->suspend_time = get_seconds() - priv->suspend_at;
+ priv->suspend_time = ktime_get_boottime_seconds() - priv->suspend_at;
/* Bring the device back up */
schedule_work(&priv->up);
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.h b/drivers/net/wireless/intel/ipw2x00/ipw2200.h
index aa301d1eee3c..f98ab1f71edd 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.h
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.h
@@ -1343,9 +1343,9 @@ struct ipw_priv {
s8 tx_power;
- /* Track time in suspend */
- unsigned long suspend_at;
- unsigned long suspend_time;
+ /* Track time in suspend using CLOCK_BOOTIME */
+ time64_t suspend_at;
+ time64_t suspend_time;
#ifdef CONFIG_PM
u32 pm_state[16];
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
index dd29f46d086b..d32d39fa2686 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
@@ -479,7 +479,6 @@ int libipw_wx_get_encode(struct libipw_device *ieee,
{
struct iw_point *erq = &(wrqu->encoding);
int len, key;
- struct lib80211_crypt_data *crypt;
struct libipw_security *sec = &ieee->sec;
LIBIPW_DEBUG_WX("GET_ENCODE\n");
@@ -492,7 +491,6 @@ int libipw_wx_get_encode(struct libipw_device *ieee,
} else
key = ieee->crypt_info.tx_keyidx;
- crypt = ieee->crypt_info.crypt[key];
erq->flags = key + 1;
if (!sec->enabled) {
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-debug.c b/drivers/net/wireless/intel/iwlegacy/3945-debug.c
index c1b4441fb8b2..a2960032be81 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-debug.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-debug.c
@@ -95,7 +95,7 @@ il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
pos +=
scnprintf(buf + pos, bufsz - pos,
"%-32s current"
- "acumulative delta max\n",
+ "accumulative delta max\n",
"Statistics_Rx - OFDM:");
pos +=
scnprintf(buf + pos, bufsz - pos,
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 62a9794f952b..57e3b6cca234 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -476,8 +476,6 @@ il3945_tx_skb(struct il_priv *il,
int txq_id = skb_get_queue_mapping(skb);
u16 len, idx, hdr_len;
u16 firstlen, secondlen;
- u8 id;
- u8 unicast;
u8 sta_id;
u8 tid = 0;
__le16 fc;
@@ -496,9 +494,6 @@ il3945_tx_skb(struct il_priv *il,
goto drop_unlock;
}
- unicast = !is_multicast_ether_addr(hdr->addr1);
- id = 0;
-
fc = hdr->frame_control;
#ifdef CONFIG_IWLEGACY_DEBUG
@@ -957,10 +952,8 @@ il3945_rx_queue_restock(struct il_priv *il)
struct list_head *element;
struct il_rx_buf *rxb;
unsigned long flags;
- int write;
spin_lock_irqsave(&rxq->lock, flags);
- write = rxq->write & ~0x7;
while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
/* Get next free Rx buffer, remove from free list */
element = rxq->rx_free.next;
@@ -2725,7 +2718,6 @@ void
il3945_post_associate(struct il_priv *il)
{
int rc = 0;
- struct ieee80211_conf *conf = NULL;
if (!il->vif || !il->is_open)
return;
@@ -2738,8 +2730,6 @@ il3945_post_associate(struct il_priv *il)
il_scan_cancel_timeout(il, 200);
- conf = &il->hw->conf;
-
il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
il3945_commit_rxon(il);
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c
index dbf164d48ed3..3e568ce2fb20 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945.c
@@ -1634,7 +1634,6 @@ il3945_hw_reg_set_txpower(struct il_priv *il, s8 power)
{
struct il_channel_info *ch_info;
s8 max_power;
- u8 a_band;
u8 i;
if (il->tx_power_user_lmt == power) {
@@ -1650,7 +1649,6 @@ il3945_hw_reg_set_txpower(struct il_priv *il, s8 power)
for (i = 0; i < il->channel_count; i++) {
ch_info = &il->channel_info[i];
- a_band = il_is_channel_a_band(ch_info);
/* find minimum power of all user and regulatory constraints
* (does not consider h/w clipping limitations) */
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 562e94870a9c..280cd8ae1696 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -1338,15 +1338,12 @@ il4965_accumulative_stats(struct il_priv *il, __le32 * stats)
u32 *accum_stats;
u32 *delta, *max_delta;
struct stats_general_common *general, *accum_general;
- struct stats_tx *tx, *accum_tx;
prev_stats = (__le32 *) &il->_4965.stats;
accum_stats = (u32 *) &il->_4965.accum_stats;
size = sizeof(struct il_notif_stats);
general = &il->_4965.stats.general.common;
accum_general = &il->_4965.accum_stats.general.common;
- tx = &il->_4965.stats.tx;
- accum_tx = &il->_4965.accum_stats.tx;
delta = (u32 *) &il->_4965.delta_stats;
max_delta = (u32 *) &il->_4965.max_delta;
@@ -4784,7 +4781,6 @@ static void
il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
{
struct il_priv *il = context;
- struct il_ucode_header *ucode;
int err;
struct il4965_firmware_pieces pieces;
const unsigned int api_max = il->cfg->ucode_api_max;
@@ -4814,8 +4810,6 @@ il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
}
/* Data from ucode file: header followed by uCode images */
- ucode = (struct il_ucode_header *)ucode_raw->data;
-
err = il4965_load_firmware(il, ucode_raw, &pieces);
if (err)
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index 4d08d78c6b71..04e376cc898c 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -7,13 +7,13 @@ iwlwifi-objs += iwl-debug.o
iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
-iwlwifi-objs += pcie/ctxt-info.o pcie/trans-gen2.o pcie/tx-gen2.o
+iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-gen3.o
+iwlwifi-objs += pcie/trans-gen2.o pcie/tx-gen2.o
iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
iwlwifi-objs += iwl-trans.o
iwlwifi-objs += fw/notif-wait.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o
-iwlwifi-$(CONFIG_IWLMVM) += fw/common_rx.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
index a63ca8820568..fedb108db68f 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
@@ -63,6 +63,7 @@
static const struct iwl_base_params iwl2000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
+ .max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
.shadow_ram_support = true,
.led_compensation = 51,
@@ -76,6 +77,7 @@ static const struct iwl_base_params iwl2000_base_params = {
static const struct iwl_base_params iwl2030_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
+ .max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
.shadow_ram_support = true,
.led_compensation = 57,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index d4ba66aecdc9..91ca77c7571c 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -59,7 +59,7 @@
#define IWL_22000_UCODE_API_MAX 38
/* Lowest firmware API version supported */
-#define IWL_22000_UCODE_API_MIN 24
+#define IWL_22000_UCODE_API_MIN 39
/* NVM versions */
#define IWL_22000_NVM_VERSION 0x0a1d
@@ -73,29 +73,48 @@
#define IWL_22000_SMEM_OFFSET 0x400000
#define IWL_22000_SMEM_LEN 0xD0000
-#define IWL_22000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
-#define IWL_22000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
-#define IWL_22000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-"
-#define IWL_22000_HR_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-"
-#define IWL_22000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-"
-#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
+#define IWL_22000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
+#define IWL_22000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
+#define IWL_22000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-"
+#define IWL_22000_HR_A_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-"
+#define IWL_22000_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-"
+#define IWL_22000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-"
+#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
+#define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-"
#define IWL_22000_HR_MODULE_FIRMWARE(api) \
IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_JF_MODULE_FIRMWARE(api) \
IWL_22000_JF_FW_PRE __stringify(api) ".ucode"
-#define IWL_22000_HR_F0_QNJ_MODULE_FIRMWARE(api) \
- IWL_22000_HR_F0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(api) \
+ IWL_22000_HR_A_F0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \
+ IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
IWL_22000_JF_B0_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \
+ IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_22000 10
static const struct iwl_base_params iwl_22000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000,
.num_of_queues = 512,
+ .max_tfd_queue_size = 256,
+ .shadow_ram_support = true,
+ .led_compensation = 57,
+ .wd_timeout = IWL_LONG_WD_TIMEOUT,
+ .max_event_log_size = 512,
+ .shadow_reg_enable = true,
+ .pcie_l1_allowed = true,
+};
+
+static const struct iwl_base_params iwl_22560_base_params = {
+ .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000,
+ .num_of_queues = 512,
+ .max_tfd_queue_size = 65536,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
@@ -110,11 +129,9 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
};
-#define IWL_DEVICE_22000 \
+#define IWL_DEVICE_22000_COMMON \
.ucode_api_max = IWL_22000_UCODE_API_MAX, \
.ucode_api_min = IWL_22000_UCODE_API_MIN, \
- .device_family = IWL_DEVICE_FAMILY_22000, \
- .base_params = &iwl_22000_base_params, \
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_22000, \
.non_shared_ant = ANT_A, \
@@ -129,6 +146,10 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.mq_rx_supported = true, \
.vht_mu_mimo_supported = true, \
.mac_addr_from_csr = true, \
+ .ht_params = &iwl_22000_ht_params, \
+ .nvm_ver = IWL_22000_NVM_VERSION, \
+ .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, \
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
.use_tfh = true, \
.rf_id = true, \
.gen2 = true, \
@@ -136,86 +157,114 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.dbgc_supported = true, \
.min_umac_error_event_table = 0x400000
+#define IWL_DEVICE_22500 \
+ IWL_DEVICE_22000_COMMON, \
+ .device_family = IWL_DEVICE_FAMILY_22000, \
+ .base_params = &iwl_22000_base_params, \
+ .csr = &iwl_csr_v1
+
+#define IWL_DEVICE_22560 \
+ IWL_DEVICE_22000_COMMON, \
+ .device_family = IWL_DEVICE_FAMILY_22560, \
+ .base_params = &iwl_22560_base_params, \
+ .csr = &iwl_csr_v2
+
const struct iwl_cfg iwl22000_2ac_cfg_hr = {
.name = "Intel(R) Dual Band Wireless AC 22000",
.fw_name_pre = IWL_22000_HR_FW_PRE,
- IWL_DEVICE_22000,
- .csr = &iwl_csr_v1,
- .ht_params = &iwl_22000_ht_params,
- .nvm_ver = IWL_22000_NVM_VERSION,
- .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ IWL_DEVICE_22500,
};
const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb = {
.name = "Intel(R) Dual Band Wireless AC 22000",
.fw_name_pre = IWL_22000_HR_CDB_FW_PRE,
- IWL_DEVICE_22000,
- .csr = &iwl_csr_v1,
- .ht_params = &iwl_22000_ht_params,
- .nvm_ver = IWL_22000_NVM_VERSION,
- .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ IWL_DEVICE_22500,
.cdb = true,
};
const struct iwl_cfg iwl22000_2ac_cfg_jf = {
.name = "Intel(R) Dual Band Wireless AC 22000",
.fw_name_pre = IWL_22000_JF_FW_PRE,
- IWL_DEVICE_22000,
- .csr = &iwl_csr_v1,
- .ht_params = &iwl_22000_ht_params,
- .nvm_ver = IWL_22000_NVM_VERSION,
- .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ IWL_DEVICE_22500,
};
const struct iwl_cfg iwl22000_2ax_cfg_hr = {
.name = "Intel(R) Dual Band Wireless AX 22000",
.fw_name_pre = IWL_22000_HR_FW_PRE,
- IWL_DEVICE_22000,
- .csr = &iwl_csr_v1,
- .ht_params = &iwl_22000_ht_params,
- .nvm_ver = IWL_22000_NVM_VERSION,
- .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
-const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0 = {
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0 = {
.name = "Intel(R) Dual Band Wireless AX 22000",
- .fw_name_pre = IWL_22000_HR_F0_FW_PRE,
- IWL_DEVICE_22000,
- .csr = &iwl_csr_v1,
- .ht_params = &iwl_22000_ht_params,
- .nvm_ver = IWL_22000_NVM_VERSION,
- .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .fw_name_pre = IWL_22000_HR_A_F0_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = {
+ .name = "Intel(R) Dual Band Wireless AX 22000",
+ .fw_name_pre = IWL_22000_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0 = {
.name = "Intel(R) Dual Band Wireless AX 22000",
.fw_name_pre = IWL_22000_JF_B0_FW_PRE,
- IWL_DEVICE_22000,
- .csr = &iwl_csr_v1,
- .ht_params = &iwl_22000_ht_params,
- .nvm_ver = IWL_22000_NVM_VERSION,
- .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = {
.name = "Intel(R) Dual Band Wireless AX 22000",
.fw_name_pre = IWL_22000_HR_A0_FW_PRE,
- IWL_DEVICE_22000,
- .csr = &iwl_csr_v1,
- .ht_params = &iwl_22000_ht_params,
- .nvm_ver = IWL_22000_NVM_VERSION,
- .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg iwl22560_2ax_cfg_su_cdb = {
+ .name = "Intel(R) Dual Band Wireless AX 22560",
+ .fw_name_pre = IWL_22000_SU_Z0_FW_PRE,
+ IWL_DEVICE_22560,
+ .cdb = true,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_22000_HR_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
index a224f1be1ec2..36151e61a26f 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
@@ -53,6 +53,7 @@
static const struct iwl_base_params iwl5000_base_params = {
.eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
+ .max_tfd_queue_size = 256,
.pll_cfg = true,
.led_compensation = 51,
.wd_timeout = IWL_WATCHDOG_DISABLED,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
index dbcec7ce7863..b5d8274761d8 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
@@ -72,6 +72,7 @@
static const struct iwl_base_params iwl6000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
+ .max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
.led_compensation = 51,
@@ -84,6 +85,7 @@ static const struct iwl_base_params iwl6000_base_params = {
static const struct iwl_base_params iwl6050_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
+ .max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_6x50,
.shadow_ram_support = true,
.led_compensation = 51,
@@ -96,6 +98,7 @@ static const struct iwl_base_params iwl6050_base_params = {
static const struct iwl_base_params iwl6000_g2_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
+ .max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
.led_compensation = 57,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
index 69bfa827e82a..a62c8346f13a 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
@@ -123,6 +123,7 @@
static const struct iwl_base_params iwl7000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000,
.num_of_queues = 31,
+ .max_tfd_queue_size = 256,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
index 7262e973e0d6..c46fa712985b 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
@@ -104,6 +104,7 @@
static const struct iwl_base_params iwl8000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000,
.num_of_queues = 31,
+ .max_tfd_queue_size = 256,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index c8ea63d02619..24b2f7cbb308 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -95,6 +95,7 @@
static const struct iwl_base_params iwl9000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_9000,
.num_of_queues = 31,
+ .max_tfd_queue_size = 256,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
index 007bfe7656a4..08d3d8a190f6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -187,20 +189,4 @@ struct iwl_card_state_notif {
__le32 flags;
} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */
-/**
- * struct iwl_fseq_ver_mismatch_nty - Notification about version
- *
- * This notification does not have a direct impact on the init flow.
- * It means that another core (not WiFi) has initiated the FSEQ flow
- * and updated the FSEQ version. The driver only prints an error when
- * this occurs.
- *
- * @aux_read_fseq_ver: auxiliary read FSEQ version
- * @wifi_fseq_ver: FSEQ version (embedded in WiFi)
- */
-struct iwl_fseq_ver_mismatch_ntf {
- __le32 aux_read_fseq_ver;
- __le32 wifi_fseq_ver;
-} __packed; /* FSEQ_VER_MISMATCH_NTFY_API_S_VER_1 */
-
#endif /* __iwl_fw_api_alive_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index f285bacc8726..6dad748e5cdc 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -193,7 +193,8 @@ enum iwl_legacy_cmds {
FW_GET_ITEM_CMD = 0x1a,
/**
- * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2,
+ * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2 or
+ * &struct iwl_tx_cmd_gen3,
* response in &struct iwl_mvm_tx_resp or
* &struct iwl_mvm_tx_resp_v3
*/
@@ -646,13 +647,6 @@ enum iwl_system_subcmd_ids {
* @INIT_EXTENDED_CFG_CMD: &struct iwl_init_extended_cfg_cmd
*/
INIT_EXTENDED_CFG_CMD = 0x03,
-
- /**
- * @FSEQ_VER_MISMATCH_NTF: Notification about fseq version
- * mismatch during init. The format is specified in
- * &struct iwl_fseq_ver_mismatch_ntf.
- */
- FSEQ_VER_MISMATCH_NTF = 0xFF,
};
#endif /* __iwl_fw_api_commands_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index 5f6e855006dd..59b3c6e8f37b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -83,6 +85,16 @@ enum iwl_data_path_subcmd_ids {
TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
/**
+ * @STA_HE_CTXT_CMD: &struct iwl_he_sta_context_cmd
+ */
+ STA_HE_CTXT_CMD = 0x7,
+
+ /**
+ * @RFH_QUEUE_CONFIG_CMD: &struct iwl_rfh_queue_config
+ */
+ RFH_QUEUE_CONFIG_CMD = 0xD,
+
+ /**
* @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd
*/
TLC_MNG_CONFIG_CMD = 0xF,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index f2e31e040a7b..55594c93b014 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,6 +29,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -279,6 +281,10 @@ enum iwl_mac_filter_flags {
MAC_FILTER_OUT_BCAST = BIT(8),
MAC_FILTER_IN_CRC32 = BIT(11),
MAC_FILTER_IN_PROBE_REQUEST = BIT(12),
+ /**
+ * @MAC_FILTER_IN_11AX: mark BSS as supporting 802.11ax
+ */
+ MAC_FILTER_IN_11AX = BIT(14),
};
/**
@@ -406,4 +412,170 @@ struct iwl_missed_beacons_notif {
__le32 num_recvd_beacons;
} __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */
+/**
+ * struct iwl_he_backoff_conf - used for backoff configuration
+ * Per each trigger-based AC, (set by MU EDCA Parameter set info-element)
+ * used for backoff configuration of TXF5..TXF8 trigger based.
+ * The MU-TIMER is reloaded w/ MU_TIME each time a frame from the AC is sent via
+ * trigger-based TX.
+ * @cwmin: CW min
+ * @cwmax: CW max
+ * @aifsn: AIFSN
+ * AIFSN=0, means that no backoff from the specified TRIG-BASED AC is
+ * allowed till the MU-TIMER is 0
+ * @mu_time: MU time in 8TU units
+ */
+struct iwl_he_backoff_conf {
+ __le16 cwmin;
+ __le16 cwmax;
+ __le16 aifsn;
+ __le16 mu_time;
+} __packed; /* AC_QOS_DOT11AX_API_S */
+
+#define MAX_HE_SUPP_NSS 2
+#define MAX_HE_CHANNEL_BW_INDX 4
+
+/**
+ * struct iwl_he_pkt_ext - QAM thresholds
+ * The required PPE is set via HE Capabilities IE, per Nss x BW x MCS
+ * The IE is organized in the following way:
+ * Support for Nss x BW (or RU) matrix:
+ * (0=SISO, 1=MIMO2) x (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz)
+ * Each entry contains 2 QAM thresholds for 8us and 16us:
+ * 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6/7=RES
+ * i.e. QAM_th1 < QAM_th2 such if TX uses QAM_tx:
+ * QAM_tx < QAM_th1 --> PPE=0us
+ * QAM_th1 <= QAM_tx < QAM_th2 --> PPE=8us
+ * QAM_th2 <= QAM_tx --> PPE=16us
+ * @pkt_ext_qam_th: QAM thresholds
+ * For each Nss/Bw define 2 QAM thrsholds (0..5)
+ * For rates below the low_th, no need for PPE
+ * For rates between low_th and high_th, need 8us PPE
+ * For rates equal or higher then the high_th, need 16us PPE
+ * Nss (0-siso, 1-mimo2) x BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz) x
+ * (0-low_th, 1-high_th)
+ */
+struct iwl_he_pkt_ext {
+ u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_HE_CHANNEL_BW_INDX][2];
+} __packed; /* PKT_EXT_DOT11AX_API_S */
+
+/**
+ * enum iwl_he_sta_ctxt_flags - HE STA context flags
+ * @STA_CTXT_HE_REF_BSSID_VALID: ref bssid addr valid (for receiving specific
+ * control frames such as TRIG, NDPA, BACK)
+ * @STA_CTXT_HE_BSS_COLOR_DIS: BSS color disable, don't use the BSS
+ * color for RX filter but use MAC header
+ * @STA_CTXT_HE_PARTIAL_BSS_COLOR: partial BSS color allocation
+ * @STA_CTXT_HE_32BIT_BA_BITMAP: indicates the receiver supports BA bitmap
+ * of 32-bits
+ * @STA_CTXT_HE_PACKET_EXT: indicates that the packet-extension info is valid
+ * and should be used
+ * @STA_CTXT_HE_TRIG_RND_ALLOC: indicates that trigger based random allocation
+ * is enabled according to UORA element existence
+ * @STA_CTXT_HE_CONST_TRIG_RND_ALLOC: used for AV testing
+ * @STA_CTXT_HE_ACK_ENABLED: indicates that the AP supports receiving ACK-
+ * enabled AGG, i.e. both BACK and non-BACK frames in a single AGG
+ * @STA_CTXT_HE_MU_EDCA_CW: indicates that there is an element of MU EDCA
+ * parameter set, i.e. the backoff counters for trig-based ACs
+ */
+enum iwl_he_sta_ctxt_flags {
+ STA_CTXT_HE_REF_BSSID_VALID = BIT(4),
+ STA_CTXT_HE_BSS_COLOR_DIS = BIT(5),
+ STA_CTXT_HE_PARTIAL_BSS_COLOR = BIT(6),
+ STA_CTXT_HE_32BIT_BA_BITMAP = BIT(7),
+ STA_CTXT_HE_PACKET_EXT = BIT(8),
+ STA_CTXT_HE_TRIG_RND_ALLOC = BIT(9),
+ STA_CTXT_HE_CONST_TRIG_RND_ALLOC = BIT(10),
+ STA_CTXT_HE_ACK_ENABLED = BIT(11),
+ STA_CTXT_HE_MU_EDCA_CW = BIT(12),
+};
+
+/**
+ * enum iwl_he_htc_flags - HE HTC support flags
+ * @IWL_HE_HTC_SUPPORT: HE-HTC support
+ * @IWL_HE_HTC_UL_MU_RESP_SCHED: HE UL MU response schedule
+ * support via A-control field
+ * @IWL_HE_HTC_BSR_SUPP: BSR support in A-control field
+ * @IWL_HE_HTC_OMI_SUPP: A-OMI support in A-control field
+ * @IWL_HE_HTC_BQR_SUPP: A-BQR support in A-control field
+ */
+enum iwl_he_htc_flags {
+ IWL_HE_HTC_SUPPORT = BIT(0),
+ IWL_HE_HTC_UL_MU_RESP_SCHED = BIT(3),
+ IWL_HE_HTC_BSR_SUPP = BIT(4),
+ IWL_HE_HTC_OMI_SUPP = BIT(5),
+ IWL_HE_HTC_BQR_SUPP = BIT(6),
+};
+
+/*
+ * @IWL_HE_HTC_LINK_ADAP_NO_FEEDBACK: the STA does not provide HE MFB
+ * @IWL_HE_HTC_LINK_ADAP_UNSOLICITED: the STA provides only unsolicited HE MFB
+ * @IWL_HE_HTC_LINK_ADAP_BOTH: the STA is capable of providing HE MFB in
+ * response to HE MRQ and if the STA provides unsolicited HE MFB
+ */
+#define IWL_HE_HTC_LINK_ADAP_POS (1)
+#define IWL_HE_HTC_LINK_ADAP_NO_FEEDBACK (0)
+#define IWL_HE_HTC_LINK_ADAP_UNSOLICITED (2 << IWL_HE_HTC_LINK_ADAP_POS)
+#define IWL_HE_HTC_LINK_ADAP_BOTH (3 << IWL_HE_HTC_LINK_ADAP_POS)
+
+/**
+ * struct iwl_he_sta_context_cmd - configure FW to work with HE AP
+ * @sta_id: STA id
+ * @tid_limit: max num of TIDs in TX HE-SU multi-TID agg
+ * 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit
+ * @reserved1: reserved byte for future use
+ * @reserved2: reserved byte for future use
+ * @flags: see %iwl_11ax_sta_ctxt_flags
+ * @ref_bssid_addr: reference BSSID used by the AP
+ * @reserved0: reserved 2 bytes for aligning the ref_bssid_addr field to 8 bytes
+ * @htc_flags: which features are supported in HTC
+ * @frag_flags: frag support in A-MSDU
+ * @frag_level: frag support level
+ * @frag_max_num: max num of "open" MSDUs in the receiver (in power of 2)
+ * @frag_min_size: min frag size (except last frag)
+ * @pkt_ext: optional, exists according to PPE-present bit in the HE-PHY capa
+ * @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame
+ * @htc_trig_based_pkt_ext: default PE in 4us units
+ * @frame_time_rts_th: HE duration RTS threshold, in units of 32us
+ * @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1
+ * @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1
+ * @reserved3: reserved byte for future use
+ * @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues
+ */
+struct iwl_he_sta_context_cmd {
+ u8 sta_id;
+ u8 tid_limit;
+ u8 reserved1;
+ u8 reserved2;
+ __le32 flags;
+
+ /* The below fields are set via Multiple BSSID IE */
+ u8 ref_bssid_addr[6];
+ __le16 reserved0;
+
+ /* The below fields are set via HE-capabilities IE */
+ __le32 htc_flags;
+
+ u8 frag_flags;
+ u8 frag_level;
+ u8 frag_max_num;
+ u8 frag_min_size;
+
+ /* The below fields are set via PPE thresholds element */
+ struct iwl_he_pkt_ext pkt_ext;
+
+ /* The below fields are set via HE-Operation IE */
+ u8 bss_color;
+ u8 htc_trig_based_pkt_ext;
+ __le16 frame_time_rts_th;
+
+ /* Random access parameter set (i.e. RAPS) */
+ u8 rand_alloc_ecwmin;
+ u8 rand_alloc_ecwmax;
+ __le16 reserved3;
+
+ /* The below fields are set via MU EDCA parameter set element */
+ struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
+} __packed; /* STA_CONTEXT_DOT11AX_API_S */
+
#endif /* __iwl_fw_api_mac_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index 8d6dc9189985..6c5338364794 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -195,7 +195,6 @@ struct iwl_nvm_get_info_general {
* @NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED: true if 5.2 band enabled
* @NVM_MAC_SKU_FLAGS_802_11N_ENABLED: true if 11n enabled
* @NVM_MAC_SKU_FLAGS_802_11AC_ENABLED: true if 11ac enabled
- * @NVM_MAC_SKU_FLAGS_802_11AX_ENABLED: true if 11ax enabled
* @NVM_MAC_SKU_FLAGS_MIMO_DISABLED: true if MIMO disabled
* @NVM_MAC_SKU_FLAGS_WAPI_ENABLED: true if WAPI enabled
* @NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED: true if regulatory checker enabled
@@ -206,6 +205,9 @@ enum iwl_nvm_mac_sku_flags {
NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED = BIT(1),
NVM_MAC_SKU_FLAGS_802_11N_ENABLED = BIT(2),
NVM_MAC_SKU_FLAGS_802_11AC_ENABLED = BIT(3),
+ /**
+ * @NVM_MAC_SKU_FLAGS_802_11AX_ENABLED: true if 11ax enabled
+ */
NVM_MAC_SKU_FLAGS_802_11AX_ENABLED = BIT(4),
NVM_MAC_SKU_FLAGS_MIMO_DISABLED = BIT(5),
NVM_MAC_SKU_FLAGS_WAPI_ENABLED = BIT(8),
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index 21e13a315421..087fae91baef 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -314,8 +314,11 @@ enum {
IWL_RATE_MCS_8_INDEX,
IWL_RATE_MCS_9_INDEX,
IWL_LAST_VHT_RATE = IWL_RATE_MCS_9_INDEX,
+ IWL_RATE_MCS_10_INDEX,
+ IWL_RATE_MCS_11_INDEX,
+ IWL_LAST_HE_RATE = IWL_RATE_MCS_11_INDEX,
IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1,
- IWL_RATE_COUNT = IWL_LAST_VHT_RATE + 1,
+ IWL_RATE_COUNT = IWL_LAST_HE_RATE + 1,
};
#define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX)
@@ -440,8 +443,8 @@ enum {
#define RATE_LEGACY_RATE_MSK 0xff
/* Bit 10 - OFDM HE */
-#define RATE_MCS_OFDM_HE_POS 10
-#define RATE_MCS_OFDM_HE_MSK BIT(RATE_MCS_OFDM_HE_POS)
+#define RATE_MCS_HE_POS 10
+#define RATE_MCS_HE_MSK BIT(RATE_MCS_HE_POS)
/*
* Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz
@@ -482,15 +485,33 @@ enum {
#define RATE_MCS_BF_MSK (1 << RATE_MCS_BF_POS)
/*
- * Bit 20-21: HE guard interval and LTF type.
- * (0) 1xLTF+1.6us, (1) 2xLTF+0.8us,
- * (2) 2xLTF+1.6us, (3) 4xLTF+3.2us
+ * Bit 20-21: HE LTF type and guard interval
+ * HE (ext) SU:
+ * 0 1xLTF+0.8us
+ * 1 2xLTF+0.8us
+ * 2 2xLTF+1.6us
+ * 3 & SGI (bit 13) clear 4xLTF+3.2us
+ * 3 & SGI (bit 13) set 4xLTF+0.8us
+ * HE MU:
+ * 0 4xLTF+0.8us
+ * 1 2xLTF+0.8us
+ * 2 2xLTF+1.6us
+ * 3 4xLTF+3.2us
+ * HE TRIG:
+ * 0 1xLTF+1.6us
+ * 1 2xLTF+1.6us
+ * 2 4xLTF+3.2us
+ * 3 (does not occur)
*/
#define RATE_MCS_HE_GI_LTF_POS 20
#define RATE_MCS_HE_GI_LTF_MSK (3 << RATE_MCS_HE_GI_LTF_POS)
/* Bit 22-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */
#define RATE_MCS_HE_TYPE_POS 22
+#define RATE_MCS_HE_TYPE_SU (0 << RATE_MCS_HE_TYPE_POS)
+#define RATE_MCS_HE_TYPE_EXT_SU (1 << RATE_MCS_HE_TYPE_POS)
+#define RATE_MCS_HE_TYPE_MU (2 << RATE_MCS_HE_TYPE_POS)
+#define RATE_MCS_HE_TYPE_TRIG (3 << RATE_MCS_HE_TYPE_POS)
#define RATE_MCS_HE_TYPE_MSK (3 << RATE_MCS_HE_TYPE_POS)
/* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */
@@ -501,6 +522,9 @@ enum {
#define RATE_MCS_LDPC_POS 27
#define RATE_MCS_LDPC_MSK (1 << RATE_MCS_LDPC_POS)
+/* Bit 28: (1) 106-tone RX (8 MHz RU), (0) normal bandwidth */
+#define RATE_MCS_HE_106T_POS 28
+#define RATE_MCS_HE_106T_MSK (1 << RATE_MCS_HE_106T_POS)
/* Link Quality definitions */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index 7e570c4a9df0..2f599353c885 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -343,6 +345,169 @@ enum iwl_rx_mpdu_mac_info {
IWL_RX_MPDU_PHY_PHY_INDEX_MASK = 0xf0,
};
+/*
+ * enum iwl_rx_he_phy - HE PHY data
+ */
+enum iwl_rx_he_phy {
+ IWL_RX_HE_PHY_BEAM_CHNG = BIT(0),
+ IWL_RX_HE_PHY_UPLINK = BIT(1),
+ IWL_RX_HE_PHY_BSS_COLOR_MASK = 0xfc,
+ IWL_RX_HE_PHY_SPATIAL_REUSE_MASK = 0xf00,
+ IWL_RX_HE_PHY_SU_EXT_BW10 = BIT(12),
+ IWL_RX_HE_PHY_TXOP_DUR_MASK = 0xfe000,
+ IWL_RX_HE_PHY_LDPC_EXT_SYM = BIT(20),
+ IWL_RX_HE_PHY_PRE_FEC_PAD_MASK = 0x600000,
+ IWL_RX_HE_PHY_PE_DISAMBIG = BIT(23),
+ IWL_RX_HE_PHY_DOPPLER = BIT(24),
+ /* 6 bits reserved */
+ IWL_RX_HE_PHY_DELIM_EOF = BIT(31),
+
+ /* second dword - MU data */
+ IWL_RX_HE_PHY_SIGB_COMPRESSION = BIT_ULL(32 + 0),
+ IWL_RX_HE_PHY_SIBG_SYM_OR_USER_NUM_MASK = 0x1e00000000ULL,
+ IWL_RX_HE_PHY_HE_LTF_NUM_MASK = 0xe000000000ULL,
+ IWL_RX_HE_PHY_RU_ALLOC_SEC80 = BIT_ULL(32 + 8),
+ /* trigger encoded */
+ IWL_RX_HE_PHY_RU_ALLOC_MASK = 0xfe0000000000ULL,
+ IWL_RX_HE_PHY_SIGB_MCS_MASK = 0xf000000000000ULL,
+ /* 1 bit reserved */
+ IWL_RX_HE_PHY_SIGB_DCM = BIT_ULL(32 + 21),
+ IWL_RX_HE_PHY_PREAMBLE_PUNC_TYPE_MASK = 0xc0000000000000ULL,
+ /* 8 bits reserved */
+};
+
+/**
+ * struct iwl_rx_mpdu_desc_v1 - RX MPDU descriptor
+ */
+struct iwl_rx_mpdu_desc_v1 {
+ /* DW7 - carries rss_hash only when rpa_en == 1 */
+ /**
+ * @rss_hash: RSS hash value
+ */
+ __le32 rss_hash;
+ /* DW8 - carries filter_match only when rpa_en == 1 */
+ /**
+ * @filter_match: filter match value
+ */
+ __le32 filter_match;
+ /* DW9 */
+ /**
+ * @rate_n_flags: RX rate/flags encoding
+ */
+ __le32 rate_n_flags;
+ /* DW10 */
+ /**
+ * @energy_a: energy chain A
+ */
+ u8 energy_a;
+ /**
+ * @energy_b: energy chain B
+ */
+ u8 energy_b;
+ /**
+ * @channel: channel number
+ */
+ u8 channel;
+ /**
+ * @mac_context: MAC context mask
+ */
+ u8 mac_context;
+ /* DW11 */
+ /**
+ * @gp2_on_air_rise: GP2 timer value on air rise (INA)
+ */
+ __le32 gp2_on_air_rise;
+ /* DW12 & DW13 */
+ union {
+ /**
+ * @tsf_on_air_rise:
+ * TSF value on air rise (INA), only valid if
+ * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
+ */
+ __le64 tsf_on_air_rise;
+ /**
+ * @he_phy_data:
+ * HE PHY data, see &enum iwl_rx_he_phy, valid
+ * only if %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set
+ */
+ __le64 he_phy_data;
+ };
+} __packed;
+
+/**
+ * struct iwl_rx_mpdu_desc_v3 - RX MPDU descriptor
+ */
+struct iwl_rx_mpdu_desc_v3 {
+ /* DW7 - carries filter_match only when rpa_en == 1 */
+ /**
+ * @filter_match: filter match value
+ */
+ __le32 filter_match;
+ /* DW8 - carries rss_hash only when rpa_en == 1 */
+ /**
+ * @rss_hash: RSS hash value
+ */
+ __le32 rss_hash;
+ /* DW9 */
+ /**
+ * @partial_hash: 31:0 ip/tcp header hash
+ * w/o some fields (such as IP SRC addr)
+ */
+ __le32 partial_hash;
+ /* DW10 */
+ /**
+ * @raw_xsum: raw xsum value
+ */
+ __le32 raw_xsum;
+ /* DW11 */
+ /**
+ * @rate_n_flags: RX rate/flags encoding
+ */
+ __le32 rate_n_flags;
+ /* DW12 */
+ /**
+ * @energy_a: energy chain A
+ */
+ u8 energy_a;
+ /**
+ * @energy_b: energy chain B
+ */
+ u8 energy_b;
+ /**
+ * @channel: channel number
+ */
+ u8 channel;
+ /**
+ * @mac_context: MAC context mask
+ */
+ u8 mac_context;
+ /* DW13 */
+ /**
+ * @gp2_on_air_rise: GP2 timer value on air rise (INA)
+ */
+ __le32 gp2_on_air_rise;
+ /* DW14 & DW15 */
+ union {
+ /**
+ * @tsf_on_air_rise:
+ * TSF value on air rise (INA), only valid if
+ * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
+ */
+ __le64 tsf_on_air_rise;
+ /**
+ * @he_phy_data:
+ * HE PHY data, see &enum iwl_rx_he_phy, valid
+ * only if %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set
+ */
+ __le64 he_phy_data;
+ };
+ /* DW16 & DW17 */
+ /**
+ * @reserved: reserved
+ */
+ __le32 reserved[2];
+} __packed; /* RX_MPDU_RES_START_API_S_VER_3 */
+
/**
* struct iwl_rx_mpdu_desc - RX MPDU descriptor
*/
@@ -400,51 +565,14 @@ struct iwl_rx_mpdu_desc {
* @reorder_data: &enum iwl_rx_mpdu_reorder_data
*/
__le32 reorder_data;
- /* DW7 - carries rss_hash only when rpa_en == 1 */
- /**
- * @rss_hash: RSS hash value
- */
- __le32 rss_hash;
- /* DW8 - carries filter_match only when rpa_en == 1 */
- /**
- * @filter_match: filter match value
- */
- __le32 filter_match;
- /* DW9 */
- /**
- * @rate_n_flags: RX rate/flags encoding
- */
- __le32 rate_n_flags;
- /* DW10 */
- /**
- * @energy_a: energy chain A
- */
- u8 energy_a;
- /**
- * @energy_b: energy chain B
- */
- u8 energy_b;
- /**
- * @channel: channel number
- */
- u8 channel;
- /**
- * @mac_context: MAC context mask
- */
- u8 mac_context;
- /* DW11 */
- /**
- * @gp2_on_air_rise: GP2 timer value on air rise (INA)
- */
- __le32 gp2_on_air_rise;
- /* DW12 & DW13 */
- /**
- * @tsf_on_air_rise:
- * TSF value on air rise (INA), only valid if
- * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
- */
- __le64 tsf_on_air_rise;
-} __packed;
+
+ union {
+ struct iwl_rx_mpdu_desc_v1 v1;
+ struct iwl_rx_mpdu_desc_v3 v3;
+ };
+} __packed; /* RX_MPDU_RES_START_API_S_VER_3 */
+
+#define IWL_RX_DESC_SIZE_V1 offsetofend(struct iwl_rx_mpdu_desc, v1)
struct iwl_frame_release {
u8 baid;
@@ -587,4 +715,36 @@ struct iwl_ba_window_status_notif {
__le16 mpdu_rx_count[BA_WINDOW_STREAMS_MAX];
} __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */
+/**
+ * struct iwl_rfh_queue_config - RX queue configuration
+ * @q_num: Q num
+ * @enable: enable queue
+ * @reserved: alignment
+ * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
+ * @fr_bd_cb: DMA address of freeRB table
+ * @ur_bd_cb: DMA address of used RB table
+ * @fr_bd_wid: Initial index of the free table
+ */
+struct iwl_rfh_queue_data {
+ u8 q_num;
+ u8 enable;
+ __le16 reserved;
+ __le64 urbd_stts_wrptr;
+ __le64 fr_bd_cb;
+ __le64 ur_bd_cb;
+ __le32 fr_bd_wid;
+} __packed; /* RFH_QUEUE_CONFIG_S_VER_1 */
+
+/**
+ * struct iwl_rfh_queue_config - RX queue configuration
+ * @num_queues: number of queues configured
+ * @reserved: alignment
+ * @data: DMA addresses per-queue
+ */
+struct iwl_rfh_queue_config {
+ u8 num_queues;
+ u8 reserved[3];
+ struct iwl_rfh_queue_data data[];
+} __packed; /* RFH_QUEUE_CONFIG_API_S_VER_1 */
+
#endif /* __iwl_fw_api_rx_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index a2a40b515a3c..514b86123d3d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,6 +29,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -320,6 +322,29 @@ struct iwl_tx_cmd_gen2 {
struct ieee80211_hdr hdr[0];
} __packed; /* TX_CMD_API_S_VER_7 */
+/**
+ * struct iwl_tx_cmd_gen3 - TX command struct to FW for 22560 devices
+ * ( TX_CMD = 0x1c )
+ * @len: in bytes of the payload, see below for details
+ * @flags: combination of &enum iwl_tx_cmd_flags
+ * @offload_assist: TX offload configuration
+ * @dram_info: FW internal DRAM storage
+ * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
+ * cleared. Combination of RATE_MCS_*
+ * @ttl: time to live - packet lifetime limit. The FW should drop if
+ * passed.
+ * @hdr: 802.11 header
+ */
+struct iwl_tx_cmd_gen3 {
+ __le16 len;
+ __le16 flags;
+ __le32 offload_assist;
+ struct iwl_dram_sec_info dram_info;
+ __le32 rate_n_flags;
+ __le64 ttl;
+ struct ieee80211_hdr hdr[0];
+} __packed; /* TX_CMD_API_S_VER_8 */
+
/*
* TX response related data
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c b/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c
deleted file mode 100644
index 6f75985eea66..000000000000
--- a/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2017 Intel Deutschland GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Deutschland GmbH
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#include "iwl-drv.h"
-#include "runtime.h"
-#include "fw/api/commands.h"
-#include "fw/api/alive.h"
-
-static void iwl_fwrt_fseq_ver_mismatch(struct iwl_fw_runtime *fwrt,
- struct iwl_rx_cmd_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_fseq_ver_mismatch_ntf *fseq = (void *)pkt->data;
-
- IWL_ERR(fwrt, "FSEQ version mismatch (aux: %d, wifi: %d)\n",
- __le32_to_cpu(fseq->aux_read_fseq_ver),
- __le32_to_cpu(fseq->wifi_fseq_ver));
-}
-
-void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt,
- struct iwl_rx_cmd_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
-
- switch (cmd) {
- case WIDE_ID(SYSTEM_GROUP, FSEQ_VER_MISMATCH_NTF):
- iwl_fwrt_fseq_ver_mismatch(fwrt, rxb);
- break;
- default:
- break;
- }
-}
-IWL_EXPORT_SYMBOL(iwl_fwrt_handle_notification);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index fa283285fcbe..a31a42e673c4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -243,39 +243,47 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
return;
- /* Pull RXF1 */
- iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0);
- /* Pull RXF2 */
- iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size,
- RXF_DIFF_FROM_PREV, 1);
- /* Pull LMAC2 RXF1 */
- if (fwrt->smem_cfg.num_lmacs > 1)
- iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[1].rxfifo1_size,
- LMAC2_PRPH_OFFSET, 2);
-
- /* Pull TXF data from LMAC1 */
- for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
- /* Mark the number of TXF we're pulling now */
- iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i);
- iwl_fwrt_dump_txf(fwrt, dump_data, cfg->lmac[0].txfifo_size[i],
- 0, i);
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
+ /* Pull RXF1 */
+ iwl_fwrt_dump_rxf(fwrt, dump_data,
+ cfg->lmac[0].rxfifo1_size, 0, 0);
+ /* Pull RXF2 */
+ iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size,
+ RXF_DIFF_FROM_PREV, 1);
+ /* Pull LMAC2 RXF1 */
+ if (fwrt->smem_cfg.num_lmacs > 1)
+ iwl_fwrt_dump_rxf(fwrt, dump_data,
+ cfg->lmac[1].rxfifo1_size,
+ LMAC2_PRPH_OFFSET, 2);
}
- /* Pull TXF data from LMAC2 */
- if (fwrt->smem_cfg.num_lmacs > 1) {
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
+ /* Pull TXF data from LMAC1 */
for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
/* Mark the number of TXF we're pulling now */
- iwl_trans_write_prph(fwrt->trans,
- TXF_LARC_NUM + LMAC2_PRPH_OFFSET,
- i);
+ iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i);
iwl_fwrt_dump_txf(fwrt, dump_data,
- cfg->lmac[1].txfifo_size[i],
- LMAC2_PRPH_OFFSET,
- i + cfg->num_txfifo_entries);
+ cfg->lmac[0].txfifo_size[i], 0, i);
+ }
+
+ /* Pull TXF data from LMAC2 */
+ if (fwrt->smem_cfg.num_lmacs > 1) {
+ for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries;
+ i++) {
+ /* Mark the number of TXF we're pulling now */
+ iwl_trans_write_prph(fwrt->trans,
+ TXF_LARC_NUM +
+ LMAC2_PRPH_OFFSET, i);
+ iwl_fwrt_dump_txf(fwrt, dump_data,
+ cfg->lmac[1].txfifo_size[i],
+ LMAC2_PRPH_OFFSET,
+ i + cfg->num_txfifo_entries);
+ }
}
}
- if (fw_has_capa(&fwrt->fw->ucode_capa,
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
+ fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
/* Pull UMAC internal TXF data from all TXFs */
for (i = 0;
@@ -600,42 +608,54 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
fifo_data_len = 0;
- /* Count RXF2 size */
- if (mem_cfg->rxfifo2_size) {
- /* Add header info */
- fifo_data_len += mem_cfg->rxfifo2_size +
- sizeof(*dump_data) +
- sizeof(struct iwl_fw_error_dump_fifo);
- }
-
- /* Count RXF1 sizes */
- for (i = 0; i < mem_cfg->num_lmacs; i++) {
- if (!mem_cfg->lmac[i].rxfifo1_size)
- continue;
-
- /* Add header info */
- fifo_data_len += mem_cfg->lmac[i].rxfifo1_size +
- sizeof(*dump_data) +
- sizeof(struct iwl_fw_error_dump_fifo);
- }
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
- /* Count TXF sizes */
- for (i = 0; i < mem_cfg->num_lmacs; i++) {
- int j;
+ /* Count RXF2 size */
+ if (mem_cfg->rxfifo2_size) {
+ /* Add header info */
+ fifo_data_len +=
+ mem_cfg->rxfifo2_size +
+ sizeof(*dump_data) +
+ sizeof(struct iwl_fw_error_dump_fifo);
+ }
- for (j = 0; j < mem_cfg->num_txfifo_entries; j++) {
- if (!mem_cfg->lmac[i].txfifo_size[j])
+ /* Count RXF1 sizes */
+ for (i = 0; i < mem_cfg->num_lmacs; i++) {
+ if (!mem_cfg->lmac[i].rxfifo1_size)
continue;
/* Add header info */
fifo_data_len +=
- mem_cfg->lmac[i].txfifo_size[j] +
+ mem_cfg->lmac[i].rxfifo1_size +
sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_fifo);
}
}
- if (fw_has_capa(&fwrt->fw->ucode_capa,
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
+ size_t fifo_const_len = sizeof(*dump_data) +
+ sizeof(struct iwl_fw_error_dump_fifo);
+
+ /* Count TXF sizes */
+ for (i = 0; i < mem_cfg->num_lmacs; i++) {
+ int j;
+
+ for (j = 0; j < mem_cfg->num_txfifo_entries;
+ j++) {
+ if (!mem_cfg->lmac[i].txfifo_size[j])
+ continue;
+
+ /* Add header info */
+ fifo_data_len +=
+ fifo_const_len +
+ mem_cfg->lmac[i].txfifo_size[j];
+ }
+ }
+ }
+
+ if ((fwrt->fw->dbg_dump_mask &
+ BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF)) &&
+ fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
for (i = 0;
i < ARRAY_SIZE(mem_cfg->internal_txfifo_size);
@@ -652,7 +672,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
}
/* Make room for PRPH registers */
- if (!fwrt->trans->cfg->gen2) {
+ if (!fwrt->trans->cfg->gen2 &&
+ fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) {
for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm);
i++) {
/* The range includes both boundaries */
@@ -667,7 +688,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
}
if (!fwrt->trans->cfg->gen2 &&
- fwrt->trans->cfg->mq_rx_supported) {
+ fwrt->trans->cfg->mq_rx_supported &&
+ fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) {
for (i = 0; i <
ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) {
/* The range includes both boundaries */
@@ -681,34 +703,42 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
}
}
- if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
+ if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 &&
+ fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG))
radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
}
file_len = sizeof(*dump_file) +
- sizeof(*dump_data) * 3 +
- sizeof(*dump_smem_cfg) +
fifo_data_len +
prph_len +
- radio_len +
- sizeof(*dump_info);
-
- /* Make room for the SMEM, if it exists */
- if (smem_len)
- file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
-
- /* Make room for the secondary SRAM, if it exists */
- if (sram2_len)
- file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
-
- /* Make room for MEM segments */
- for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
- file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
- le32_to_cpu(fw_dbg_mem[i].len);
+ radio_len;
+
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO))
+ file_len += sizeof(*dump_data) + sizeof(*dump_info);
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG))
+ file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg);
+
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
+ /* Make room for the SMEM, if it exists */
+ if (smem_len)
+ file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
+ smem_len;
+
+ /* Make room for the secondary SRAM, if it exists */
+ if (sram2_len)
+ file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
+ sram2_len;
+
+ /* Make room for MEM segments */
+ for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
+ file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
+ le32_to_cpu(fw_dbg_mem[i].len);
+ }
}
/* Make room for fw's virtual image pages, if it exists */
- if (!fwrt->trans->cfg->gen2 &&
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
+ !fwrt->trans->cfg->gen2 &&
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
fwrt->fw_paging_db[0].fw_paging_block)
file_len += fwrt->num_of_paging_blk *
@@ -722,12 +752,14 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
sizeof(*dump_info) + sizeof(*dump_smem_cfg);
}
- if (fwrt->dump.desc)
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
+ fwrt->dump.desc)
file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
fwrt->dump.desc->len;
- if (!fwrt->fw->n_dbg_mem_tlv)
- file_len += sram_len + sizeof(*dump_mem);
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM) &&
+ !fwrt->fw->n_dbg_mem_tlv)
+ file_len += sizeof(*dump_data) + sram_len + sizeof(*dump_mem);
dump_file = vzalloc(file_len);
if (!dump_file) {
@@ -740,48 +772,56 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
dump_data = (void *)dump_file->data;
- dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
- dump_data->len = cpu_to_le32(sizeof(*dump_info));
- dump_info = (void *)dump_data->data;
- dump_info->device_family =
- fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 ?
- cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
- cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
- dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
- memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
- sizeof(dump_info->fw_human_readable));
- strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name,
- sizeof(dump_info->dev_human_readable));
- strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
- sizeof(dump_info->bus_human_readable));
-
- dump_data = iwl_fw_error_next_data(dump_data);
-
- /* Dump shared memory configuration */
- dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG);
- dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
- dump_smem_cfg = (void *)dump_data->data;
- dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs);
- dump_smem_cfg->num_txfifo_entries =
- cpu_to_le32(mem_cfg->num_txfifo_entries);
- for (i = 0; i < MAX_NUM_LMAC; i++) {
- int j;
-
- for (j = 0; j < TX_FIFO_MAX_NUM; j++)
- dump_smem_cfg->lmac[i].txfifo_size[j] =
- cpu_to_le32(mem_cfg->lmac[i].txfifo_size[j]);
- dump_smem_cfg->lmac[i].rxfifo1_size =
- cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size);
- }
- dump_smem_cfg->rxfifo2_size = cpu_to_le32(mem_cfg->rxfifo2_size);
- dump_smem_cfg->internal_txfifo_addr =
- cpu_to_le32(mem_cfg->internal_txfifo_addr);
- for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) {
- dump_smem_cfg->internal_txfifo_size[i] =
- cpu_to_le32(mem_cfg->internal_txfifo_size[i]);
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
+ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
+ dump_data->len = cpu_to_le32(sizeof(*dump_info));
+ dump_info = (void *)dump_data->data;
+ dump_info->device_family =
+ fwrt->trans->cfg->device_family ==
+ IWL_DEVICE_FAMILY_7000 ?
+ cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
+ cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
+ dump_info->hw_step =
+ cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
+ memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
+ sizeof(dump_info->fw_human_readable));
+ strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name,
+ sizeof(dump_info->dev_human_readable) - 1);
+ strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
+ sizeof(dump_info->bus_human_readable) - 1);
+
+ dump_data = iwl_fw_error_next_data(dump_data);
}
- dump_data = iwl_fw_error_next_data(dump_data);
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) {
+ /* Dump shared memory configuration */
+ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG);
+ dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
+ dump_smem_cfg = (void *)dump_data->data;
+ dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs);
+ dump_smem_cfg->num_txfifo_entries =
+ cpu_to_le32(mem_cfg->num_txfifo_entries);
+ for (i = 0; i < MAX_NUM_LMAC; i++) {
+ int j;
+ u32 *txf_size = mem_cfg->lmac[i].txfifo_size;
+
+ for (j = 0; j < TX_FIFO_MAX_NUM; j++)
+ dump_smem_cfg->lmac[i].txfifo_size[j] =
+ cpu_to_le32(txf_size[j]);
+ dump_smem_cfg->lmac[i].rxfifo1_size =
+ cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size);
+ }
+ dump_smem_cfg->rxfifo2_size =
+ cpu_to_le32(mem_cfg->rxfifo2_size);
+ dump_smem_cfg->internal_txfifo_addr =
+ cpu_to_le32(mem_cfg->internal_txfifo_addr);
+ for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) {
+ dump_smem_cfg->internal_txfifo_size[i] =
+ cpu_to_le32(mem_cfg->internal_txfifo_size[i]);
+ }
+
+ dump_data = iwl_fw_error_next_data(dump_data);
+ }
/* We only dump the FIFOs if the FW is in error state */
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
@@ -790,7 +830,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
iwl_read_radio_regs(fwrt, &dump_data);
}
- if (fwrt->dump.desc) {
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
+ fwrt->dump.desc) {
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
fwrt->dump.desc->len);
@@ -805,7 +846,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
if (monitor_dump_only)
goto dump_trans_data;
- if (!fwrt->fw->n_dbg_mem_tlv) {
+ if (!fwrt->fw->n_dbg_mem_tlv &&
+ fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
dump_mem = (void *)dump_data->data;
@@ -821,6 +863,9 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs);
bool success;
+ if (!(fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)))
+ break;
+
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(len + sizeof(*dump_mem));
dump_mem = (void *)dump_data->data;
@@ -854,7 +899,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
dump_data = iwl_fw_error_next_data(dump_data);
}
- if (smem_len) {
+ if (smem_len && fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
IWL_DEBUG_INFO(fwrt, "WRT SMEM dump\n");
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
@@ -867,7 +912,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
dump_data = iwl_fw_error_next_data(dump_data);
}
- if (sram2_len) {
+ if (sram2_len && fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
IWL_DEBUG_INFO(fwrt, "WRT SRAM dump\n");
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
@@ -881,7 +926,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
}
/* Dump fw's virtual image */
- if (!fwrt->trans->cfg->gen2 &&
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
+ !fwrt->trans->cfg->gen2 &&
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
fwrt->fw_paging_db[0].fw_paging_block) {
IWL_DEBUG_INFO(fwrt, "WRT paging dump\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 9d939cbaf6c6..bbf2b265a06a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -146,6 +146,9 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_FW_GSCAN_CAPA = 50,
IWL_UCODE_TLV_FW_MEM_SEG = 51,
IWL_UCODE_TLV_IML = 52,
+
+ /* TLVs 0x1000-0x2000 are for internal driver usage */
+ IWL_UCODE_TLV_FW_DBG_DUMP_LST = 0x1000,
};
struct iwl_ucode_tlv {
@@ -318,7 +321,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR
* is supported.
* @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
- * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
+ * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan (no longer used)
* @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification
* @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm
* @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related
@@ -889,39 +892,4 @@ struct iwl_fw_dbg_conf_tlv {
struct iwl_fw_dbg_conf_hcmd hcmd;
} __packed;
-/**
- * struct iwl_fw_gscan_capabilities - gscan capabilities supported by FW
- * @max_scan_cache_size: total space allocated for scan results (in bytes).
- * @max_scan_buckets: maximum number of channel buckets.
- * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
- * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
- * @max_scan_reporting_threshold: max possible report threshold. in percentage.
- * @max_hotlist_aps: maximum number of entries for hotlist APs.
- * @max_significant_change_aps: maximum number of entries for significant
- * change APs.
- * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
- * hold.
- * @max_hotlist_ssids: maximum number of entries for hotlist SSIDs.
- * @max_number_epno_networks: max number of epno entries.
- * @max_number_epno_networks_by_ssid: max number of epno entries if ssid is
- * specified.
- * @max_number_of_white_listed_ssid: max number of white listed SSIDs.
- * @max_number_of_black_listed_ssid: max number of black listed SSIDs.
- */
-struct iwl_fw_gscan_capabilities {
- __le32 max_scan_cache_size;
- __le32 max_scan_buckets;
- __le32 max_ap_cache_per_scan;
- __le32 max_rssi_sample_size;
- __le32 max_scan_reporting_threshold;
- __le32 max_hotlist_aps;
- __le32 max_significant_change_aps;
- __le32 max_bssid_history_entries;
- __le32 max_hotlist_ssids;
- __le32 max_number_epno_networks;
- __le32 max_number_epno_networks_by_ssid;
- __le32 max_number_of_white_listed_ssid;
- __le32 max_number_of_black_listed_ssid;
-} __packed;
-
#endif /* __iwl_fw_file_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index f4912382b6af..0861b97c4233 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -193,41 +193,6 @@ struct iwl_fw_cscheme_list {
} __packed;
/**
- * struct iwl_gscan_capabilities - gscan capabilities supported by FW
- * @max_scan_cache_size: total space allocated for scan results (in bytes).
- * @max_scan_buckets: maximum number of channel buckets.
- * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
- * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
- * @max_scan_reporting_threshold: max possible report threshold. in percentage.
- * @max_hotlist_aps: maximum number of entries for hotlist APs.
- * @max_significant_change_aps: maximum number of entries for significant
- * change APs.
- * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
- * hold.
- * @max_hotlist_ssids: maximum number of entries for hotlist SSIDs.
- * @max_number_epno_networks: max number of epno entries.
- * @max_number_epno_networks_by_ssid: max number of epno entries if ssid is
- * specified.
- * @max_number_of_white_listed_ssid: max number of white listed SSIDs.
- * @max_number_of_black_listed_ssid: max number of black listed SSIDs.
- */
-struct iwl_gscan_capabilities {
- u32 max_scan_cache_size;
- u32 max_scan_buckets;
- u32 max_ap_cache_per_scan;
- u32 max_rssi_sample_size;
- u32 max_scan_reporting_threshold;
- u32 max_hotlist_aps;
- u32 max_significant_change_aps;
- u32 max_bssid_history_entries;
- u32 max_hotlist_ssids;
- u32 max_number_epno_networks;
- u32 max_number_epno_networks_by_ssid;
- u32 max_number_of_white_listed_ssid;
- u32 max_number_of_black_listed_ssid;
-};
-
-/**
* enum iwl_fw_type - iwlwifi firmware type
* @IWL_FW_DVM: DVM firmware
* @IWL_FW_MVM: MVM firmware
@@ -298,7 +263,7 @@ struct iwl_fw {
size_t n_dbg_mem_tlv;
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
u8 dbg_dest_reg_num;
- struct iwl_gscan_capabilities gscan_capa;
+ u32 dbg_dump_mask;
};
static inline const char *get_fw_dbg_mode_string(int mode)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index d8db1dd100b0..ed23367f7088 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -168,7 +168,4 @@ void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt);
void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt);
-void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt,
- struct iwl_rx_cmd_buffer *rxb);
-
#endif /* __iwl_fw_runtime_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
index fb4b6442b4d7..ff85d69c2a8c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -143,7 +145,7 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
return;
pkt = cmd.resp_pkt;
- if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_22000)
+ if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000)
iwl_parse_shared_mem_22000(fwrt, pkt);
else
iwl_parse_shared_mem(fwrt, pkt);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 84a816809723..12fddcf15bab 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -93,6 +93,7 @@ enum iwl_device_family {
IWL_DEVICE_FAMILY_8000,
IWL_DEVICE_FAMILY_9000,
IWL_DEVICE_FAMILY_22000,
+ IWL_DEVICE_FAMILY_22560,
};
/*
@@ -176,6 +177,7 @@ static inline u8 num_of_ant(u8 mask)
* @apmg_wake_up_wa: should the MAC access REQ be asserted when a command
* is in flight. This is due to a HW bug in 7260, 3160 and 7265.
* @scd_chain_ext_wa: should the chain extension feature in SCD be disabled.
+ * @max_tfd_queue_size: max number of entries in tfd queue.
*/
struct iwl_base_params {
unsigned int wd_timeout;
@@ -191,6 +193,7 @@ struct iwl_base_params {
scd_chain_ext_wa:1;
u16 num_of_queues; /* def: HW dependent */
+ u32 max_tfd_queue_size; /* def: HW dependent */
u8 max_ll_items;
u8 led_compensation;
@@ -571,9 +574,11 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
-extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0;
+extern const struct iwl_cfg iwl22560_2ax_cfg_su_cdb;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
new file mode 100644
index 000000000000..ebea99189ca9
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
@@ -0,0 +1,286 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_context_info_file_gen3_h__
+#define __iwl_context_info_file_gen3_h__
+
+#include "iwl-context-info.h"
+
+#define CSR_CTXT_INFO_BOOT_CTRL 0x0
+#define CSR_CTXT_INFO_ADDR 0x118
+#define CSR_IML_DATA_ADDR 0x120
+#define CSR_IML_SIZE_ADDR 0x128
+#define CSR_IML_RESP_ADDR 0x12c
+
+/* Set bit for enabling automatic function boot */
+#define CSR_AUTO_FUNC_BOOT_ENA BIT(1)
+/* Set bit for initiating function boot */
+#define CSR_AUTO_FUNC_INIT BIT(7)
+
+/**
+ * enum iwl_prph_scratch_mtr_format - tfd size configuration
+ * @IWL_PRPH_MTR_FORMAT_16B: 16 bit tfd
+ * @IWL_PRPH_MTR_FORMAT_32B: 32 bit tfd
+ * @IWL_PRPH_MTR_FORMAT_64B: 64 bit tfd
+ * @IWL_PRPH_MTR_FORMAT_256B: 256 bit tfd
+ */
+enum iwl_prph_scratch_mtr_format {
+ IWL_PRPH_MTR_FORMAT_16B = 0x0,
+ IWL_PRPH_MTR_FORMAT_32B = 0x40000,
+ IWL_PRPH_MTR_FORMAT_64B = 0x80000,
+ IWL_PRPH_MTR_FORMAT_256B = 0xC0000,
+};
+
+/**
+ * enum iwl_prph_scratch_flags - PRPH scratch control flags
+ * @IWL_PRPH_SCRATCH_EARLY_DEBUG_EN: enable early debug conf
+ * @IWL_PRPH_SCRATCH_EDBG_DEST_DRAM: use DRAM, with size allocated
+ * in hwm config.
+ * @IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL: use buffer on SRAM
+ * @IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER: use st arbiter, mainly for
+ * multicomm.
+ * @IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF: route debug data to SoC HW
+ * @IWL_PRPH_SCTATCH_RB_SIZE_4K: Use 4K RB size (the default is 2K)
+ * @IWL_PRPH_SCRATCH_MTR_MODE: format used for completion - 0: for
+ * completion descriptor, 1 for responses (legacy)
+ * @IWL_PRPH_SCRATCH_MTR_FORMAT: a mask for the size of the tfd.
+ * There are 4 optional values: 0: 16 bit, 1: 32 bit, 2: 64 bit,
+ * 3: 256 bit.
+ */
+enum iwl_prph_scratch_flags {
+ IWL_PRPH_SCRATCH_EARLY_DEBUG_EN = BIT(4),
+ IWL_PRPH_SCRATCH_EDBG_DEST_DRAM = BIT(8),
+ IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL = BIT(9),
+ IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER = BIT(10),
+ IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF = BIT(11),
+ IWL_PRPH_SCRATCH_RB_SIZE_4K = BIT(16),
+ IWL_PRPH_SCRATCH_MTR_MODE = BIT(17),
+ IWL_PRPH_SCRATCH_MTR_FORMAT = BIT(18) | BIT(19),
+};
+
+/*
+ * struct iwl_prph_scratch_version - version structure
+ * @mac_id: SKU and revision id
+ * @version: prph scratch information version id
+ * @size: the size of the context information in DWs
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_version {
+ __le16 mac_id;
+ __le16 version;
+ __le16 size;
+ __le16 reserved;
+} __packed; /* PERIPH_SCRATCH_VERSION_S */
+
+/*
+ * struct iwl_prph_scratch_control - control structure
+ * @control_flags: context information flags see &enum iwl_prph_scratch_flags
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_control {
+ __le32 control_flags;
+ __le32 reserved;
+} __packed; /* PERIPH_SCRATCH_CONTROL_S */
+
+/*
+ * struct iwl_prph_scratch_ror_cfg - ror config
+ * @ror_base_addr: ror start address
+ * @ror_size: ror size in DWs
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_ror_cfg {
+ __le64 ror_base_addr;
+ __le32 ror_size;
+ __le32 reserved;
+} __packed; /* PERIPH_SCRATCH_ROR_CFG_S */
+
+/*
+ * struct iwl_prph_scratch_hwm_cfg - hwm config
+ * @hwm_base_addr: hwm start address
+ * @hwm_size: hwm size in DWs
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_hwm_cfg {
+ __le64 hwm_base_addr;
+ __le32 hwm_size;
+ __le32 reserved;
+} __packed; /* PERIPH_SCRATCH_HWM_CFG_S */
+
+/*
+ * struct iwl_prph_scratch_rbd_cfg - RBDs configuration
+ * @free_rbd_addr: default queue free RB CB base address
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_rbd_cfg {
+ __le64 free_rbd_addr;
+ __le32 reserved;
+} __packed; /* PERIPH_SCRATCH_RBD_CFG_S */
+
+/*
+ * struct iwl_prph_scratch_ctrl_cfg - prph scratch ctrl and config
+ * @version: version information of context info and HW
+ * @control: control flags of FH configurations
+ * @ror_cfg: ror configuration
+ * @hwm_cfg: hwm configuration
+ * @rbd_cfg: default RX queue configuration
+ */
+struct iwl_prph_scratch_ctrl_cfg {
+ struct iwl_prph_scratch_version version;
+ struct iwl_prph_scratch_control control;
+ struct iwl_prph_scratch_ror_cfg ror_cfg;
+ struct iwl_prph_scratch_hwm_cfg hwm_cfg;
+ struct iwl_prph_scratch_rbd_cfg rbd_cfg;
+} __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */
+
+/*
+ * struct iwl_prph_scratch - peripheral scratch mapping
+ * @ctrl_cfg: control and configuration of prph scratch
+ * @dram: firmware images addresses in DRAM
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch {
+ struct iwl_prph_scratch_ctrl_cfg ctrl_cfg;
+ __le32 reserved[16];
+ struct iwl_context_info_dram dram;
+} __packed; /* PERIPH_SCRATCH_S */
+
+/*
+ * struct iwl_prph_info - peripheral information
+ * @boot_stage_mirror: reflects the value in the Boot Stage CSR register
+ * @ipc_status_mirror: reflects the value in the IPC Status CSR register
+ * @sleep_notif: indicates the peripheral sleep status
+ * @reserved: reserved
+ */
+struct iwl_prph_info {
+ __le32 boot_stage_mirror;
+ __le32 ipc_status_mirror;
+ __le32 sleep_notif;
+ __le32 reserved;
+} __packed; /* PERIPH_INFO_S */
+
+/*
+ * struct iwl_context_info_gen3 - device INIT configuration
+ * @version: version of the context information
+ * @size: size of context information in DWs
+ * @config: context in which the peripheral would execute - a subset of
+ * capability csr register published by the peripheral
+ * @prph_info_base_addr: the peripheral information structure start address
+ * @cr_head_idx_arr_base_addr: the completion ring head index array
+ * start address
+ * @tr_tail_idx_arr_base_addr: the transfer ring tail index array
+ * start address
+ * @cr_tail_idx_arr_base_addr: the completion ring tail index array
+ * start address
+ * @tr_head_idx_arr_base_addr: the transfer ring head index array
+ * start address
+ * @cr_idx_arr_size: number of entries in the completion ring index array
+ * @tr_idx_arr_size: number of entries in the transfer ring index array
+ * @mtr_base_addr: the message transfer ring start address
+ * @mcr_base_addr: the message completion ring start address
+ * @mtr_size: number of entries which the message transfer ring can hold
+ * @mcr_size: number of entries which the message completion ring can hold
+ * @mtr_doorbell_vec: the doorbell vector associated with the message
+ * transfer ring
+ * @mcr_doorbell_vec: the doorbell vector associated with the message
+ * completion ring
+ * @mtr_msi_vec: the MSI which shall be generated by the peripheral after
+ * completing a transfer descriptor in the message transfer ring
+ * @mcr_msi_vec: the MSI which shall be generated by the peripheral after
+ * completing a completion descriptor in the message completion ring
+ * @mtr_opt_header_size: the size of the optional header in the transfer
+ * descriptor associated with the message transfer ring in DWs
+ * @mtr_opt_footer_size: the size of the optional footer in the transfer
+ * descriptor associated with the message transfer ring in DWs
+ * @mcr_opt_header_size: the size of the optional header in the completion
+ * descriptor associated with the message completion ring in DWs
+ * @mcr_opt_footer_size: the size of the optional footer in the completion
+ * descriptor associated with the message completion ring in DWs
+ * @msg_rings_ctrl_flags: message rings control flags
+ * @prph_info_msi_vec: the MSI which shall be generated by the peripheral
+ * after updating the Peripheral Information structure
+ * @prph_scratch_base_addr: the peripheral scratch structure start address
+ * @prph_scratch_size: the size of the peripheral scratch structure in DWs
+ * @reserved: reserved
+ */
+struct iwl_context_info_gen3 {
+ __le16 version;
+ __le16 size;
+ __le32 config;
+ __le64 prph_info_base_addr;
+ __le64 cr_head_idx_arr_base_addr;
+ __le64 tr_tail_idx_arr_base_addr;
+ __le64 cr_tail_idx_arr_base_addr;
+ __le64 tr_head_idx_arr_base_addr;
+ __le16 cr_idx_arr_size;
+ __le16 tr_idx_arr_size;
+ __le64 mtr_base_addr;
+ __le64 mcr_base_addr;
+ __le16 mtr_size;
+ __le16 mcr_size;
+ __le16 mtr_doorbell_vec;
+ __le16 mcr_doorbell_vec;
+ __le16 mtr_msi_vec;
+ __le16 mcr_msi_vec;
+ u8 mtr_opt_header_size;
+ u8 mtr_opt_footer_size;
+ u8 mcr_opt_header_size;
+ u8 mcr_opt_footer_size;
+ __le16 msg_rings_ctrl_flags;
+ __le16 prph_info_msi_vec;
+ __le64 prph_scratch_base_addr;
+ __le32 prph_scratch_size;
+ __le32 reserved;
+} __packed; /* IPC_CONTEXT_INFO_S */
+
+int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
+ const struct fw_img *fw);
+void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans);
+
+#endif /* __iwl_context_info_file_gen3_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
index b870c0986744..4b6fdf3b15fb 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -19,6 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -199,5 +201,8 @@ struct iwl_context_info {
int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *fw);
void iwl_pcie_ctxt_info_free(struct iwl_trans *trans);
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans);
+int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
+ const struct fw_img *fw,
+ struct iwl_context_info_dram *ctxt_dram);
#endif /* __iwl_context_info_file_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index ba971d3946e2..9019de99f077 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -339,6 +339,9 @@ enum {
/* HW_RF CHIP ID */
#define CSR_HW_RF_ID_TYPE_CHIP_ID(_val) (((_val) >> 12) & 0xFFF)
+/* HW_RF CHIP STEP */
+#define CSR_HW_RF_STEP(_val) (((_val) >> 8) & 0xF)
+
/* EEPROM REG */
#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
@@ -592,6 +595,8 @@ enum msix_fh_int_causes {
enum msix_hw_int_causes {
MSIX_HW_INT_CAUSES_REG_ALIVE = BIT(0),
MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1),
+ MSIX_HW_INT_CAUSES_REG_IPC = BIT(1),
+ MSIX_HW_INT_CAUSES_REG_SW_ERR_V2 = BIT(5),
MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6),
MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7),
MSIX_HW_INT_CAUSES_REG_PERIODIC = BIT(8),
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index c59ce4f8a5ed..c0631255aee7 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -402,35 +402,6 @@ static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
return 0;
}
-static void iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data,
- const u32 len)
-{
- struct iwl_fw_gscan_capabilities *fw_capa = (void *)data;
- struct iwl_gscan_capabilities *capa = &fw->gscan_capa;
-
- capa->max_scan_cache_size = le32_to_cpu(fw_capa->max_scan_cache_size);
- capa->max_scan_buckets = le32_to_cpu(fw_capa->max_scan_buckets);
- capa->max_ap_cache_per_scan =
- le32_to_cpu(fw_capa->max_ap_cache_per_scan);
- capa->max_rssi_sample_size = le32_to_cpu(fw_capa->max_rssi_sample_size);
- capa->max_scan_reporting_threshold =
- le32_to_cpu(fw_capa->max_scan_reporting_threshold);
- capa->max_hotlist_aps = le32_to_cpu(fw_capa->max_hotlist_aps);
- capa->max_significant_change_aps =
- le32_to_cpu(fw_capa->max_significant_change_aps);
- capa->max_bssid_history_entries =
- le32_to_cpu(fw_capa->max_bssid_history_entries);
- capa->max_hotlist_ssids = le32_to_cpu(fw_capa->max_hotlist_ssids);
- capa->max_number_epno_networks =
- le32_to_cpu(fw_capa->max_number_epno_networks);
- capa->max_number_epno_networks_by_ssid =
- le32_to_cpu(fw_capa->max_number_epno_networks_by_ssid);
- capa->max_number_of_white_listed_ssid =
- le32_to_cpu(fw_capa->max_number_of_white_listed_ssid);
- capa->max_number_of_black_listed_ssid =
- le32_to_cpu(fw_capa->max_number_of_black_listed_ssid);
-}
-
/*
* Gets uCode section from tlv.
*/
@@ -644,7 +615,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
u32 build, paging_mem_size;
int num_of_cpus;
bool usniffer_req = false;
- bool gscan_capa = false;
if (len < sizeof(*ucode)) {
IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
@@ -1043,6 +1013,17 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
pieces->dbg_trigger_tlv_len[trigger_id] = tlv_len;
break;
}
+ case IWL_UCODE_TLV_FW_DBG_DUMP_LST: {
+ if (tlv_len != sizeof(u32)) {
+ IWL_ERR(drv,
+ "dbg lst mask size incorrect, skip\n");
+ break;
+ }
+
+ drv->fw.dbg_dump_mask =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
+ }
case IWL_UCODE_TLV_SEC_RT_USNIFFER:
*usniffer_images = true;
iwl_store_ucode_sec(pieces, tlv_data,
@@ -1079,16 +1060,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
paging_mem_size;
break;
case IWL_UCODE_TLV_FW_GSCAN_CAPA:
- /*
- * Don't return an error in case of a shorter tlv_len
- * to enable loading of FW that has an old format
- * of GSCAN capabilities TLV.
- */
- if (tlv_len < sizeof(struct iwl_fw_gscan_capabilities))
- break;
-
- iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len);
- gscan_capa = true;
+ /* ignored */
break;
case IWL_UCODE_TLV_FW_MEM_SEG: {
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
@@ -1153,19 +1125,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
return -EINVAL;
}
- /*
- * If ucode advertises that it supports GSCAN but GSCAN
- * capabilities TLV is not present, or if it has an old format,
- * warn and continue without GSCAN.
- */
- if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
- !gscan_capa) {
- IWL_DEBUG_INFO(drv,
- "GSCAN is supported but capabilities TLV is unavailable\n");
- __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT,
- capa->_capa);
- }
-
return 0;
invalid_tlv_len:
@@ -1316,6 +1275,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
fw->ucode_capa.standard_phy_calibration_size =
IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
+ /* dump all fw memory areas by default */
+ fw->dbg_dump_mask = 0xffffffff;
pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
if (!pieces)
@@ -1787,7 +1748,8 @@ MODULE_PARM_DESC(11n_disable,
"disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX");
module_param_named(amsdu_size, iwlwifi_mod_params.amsdu_size, int, 0444);
MODULE_PARM_DESC(amsdu_size,
- "amsdu size 0: 12K for multi Rx queue devices, 4K for other devices 1:4K 2:8K 3:12K (default 0)");
+ "amsdu size 0: 12K for multi Rx queue devices, 2K for 22560 devices, "
+ "4K for other devices 1:4K 2:8K 3:12K 4: 2K (default 0)");
module_param_named(fw_restart, iwlwifi_mod_params.fw_restart, bool, 0444);
MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)");
@@ -1856,3 +1818,7 @@ module_param_named(remove_when_gone,
0444);
MODULE_PARM_DESC(remove_when_gone,
"Remove dev from PCIe bus if it is deemed inaccessible (default: false)");
+
+module_param_named(disable_11ax, iwlwifi_mod_params.disable_11ax, bool,
+ S_IRUGO);
+MODULE_PARM_DESC(disable_11ax, "Disable HE capabilities (default: false)");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
index 777f5df8a0c6..a4c96215933b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
@@ -7,6 +7,7 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -18,9 +19,7 @@
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
+ * along with this program;
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
@@ -33,6 +32,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -767,7 +767,7 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
if ((cfg->mq_rx_supported &&
- iwlwifi_mod_params.amsdu_size != IWL_AMSDU_4K) ||
+ iwlwifi_mod_params.amsdu_size == IWL_AMSDU_DEF) ||
iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K)
ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 11789ffb6512..df0e9ffff706 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -18,9 +19,7 @@
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
+ * along with this program.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
@@ -33,6 +32,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -434,13 +434,15 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
* RXF to DRAM.
* Once the RXF-to-DRAM DMA is active, this flag is immediately turned off.
*/
-#define RFH_GEN_STATUS 0xA09808
+#define RFH_GEN_STATUS 0xA09808
+#define RFH_GEN_STATUS_GEN3 0xA07824
#define RBD_FETCH_IDLE BIT(29)
#define SRAM_DMA_IDLE BIT(30)
#define RXF_DMA_IDLE BIT(31)
/* DMA configuration */
-#define RFH_RXF_DMA_CFG 0xA09820
+#define RFH_RXF_DMA_CFG 0xA09820
+#define RFH_RXF_DMA_CFG_GEN3 0xA07880
/* RB size */
#define RFH_RXF_DMA_RB_SIZE_MASK (0x000F0000) /* bits 16-19 */
#define RFH_RXF_DMA_RB_SIZE_POS 16
@@ -643,10 +645,13 @@ struct iwl_rb_status {
#define TFD_QUEUE_SIZE_MAX (256)
+#define TFD_QUEUE_SIZE_MAX_GEN3 (65536)
/* cb size is the exponent - 3 */
#define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3)
#define TFD_QUEUE_SIZE_BC_DUP (64)
#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
+#define TFD_QUEUE_BC_SIZE_GEN3 (TFD_QUEUE_SIZE_MAX_GEN3 + \
+ TFD_QUEUE_SIZE_BC_DUP)
#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
#define IWL_NUM_OF_TBS 20
#define IWL_TFH_NUM_TBS 25
@@ -753,7 +758,7 @@ struct iwl_tfh_tfd {
* For devices up to 22000:
* @tfd_offset 0-12 - tx command byte count
* 12-16 - station index
- * For 22000 and on:
+ * For 22000:
* @tfd_offset 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks
* 14-16 - reserved
@@ -762,4 +767,15 @@ struct iwlagn_scd_bc_tbl {
__le16 tfd_offset[TFD_QUEUE_BC_SIZE];
} __packed;
+/**
+ * struct iwl_gen3_bc_tbl scheduler byte count table gen3
+ * For 22560 and on:
+ * @tfd_offset: 0-12 - tx command byte count
+ * 12-13 - number of 64 byte chunks
+ * 14-16 - reserved
+ */
+struct iwl_gen3_bc_tbl {
+ __le16 tfd_offset[TFD_QUEUE_BC_SIZE_GEN3];
+} __packed;
+
#endif /* !__iwl_fh_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
index a7dd8a8cddf9..97072cf75bca 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -17,9 +18,7 @@
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
+ * along with this program;
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
@@ -31,6 +30,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -90,6 +90,8 @@ enum iwl_amsdu_size {
IWL_AMSDU_4K = 1,
IWL_AMSDU_8K = 2,
IWL_AMSDU_12K = 3,
+ /* Add 2K at the end to avoid breaking current API */
+ IWL_AMSDU_2K = 4,
};
enum iwl_uapsd_disable {
@@ -144,6 +146,10 @@ struct iwl_mod_params {
bool lar_disable;
bool fw_monitor;
bool disable_11ac;
+ /**
+ * @disable_11ax: disable HE capabilities, default = false
+ */
+ bool disable_11ax;
bool remove_when_gone;
};
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index b815ba38dbdb..b4c3a957c102 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -430,6 +430,13 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
else
vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
break;
+ case IWL_AMSDU_2K:
+ if (cfg->mq_rx_supported)
+ vht_cap->cap |=
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
+ else
+ WARN(1, "RB size of 2K is not supported by this device\n");
+ break;
case IWL_AMSDU_4K:
vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
break;
@@ -463,6 +470,101 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map;
}
+static struct ieee80211_sband_iftype_data iwl_he_capa = {
+ .types_mask = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP),
+ .he_cap = {
+ .has_he = true,
+ .he_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_HE_MAC_CAP0_HTC_HE,
+ .mac_cap_info[1] =
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
+ IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8,
+ .mac_cap_info[2] =
+ IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP |
+ IEEE80211_HE_MAC_CAP2_ACK_EN,
+ .mac_cap_info[3] =
+ IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU |
+ IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2,
+ .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU,
+ .phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_DUAL_BAND |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G,
+ .phy_cap_info[1] =
+ IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+ IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS,
+ .phy_cap_info[2] =
+ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ,
+ .phy_cap_info[3] =
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
+ .phy_cap_info[4] =
+ IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8,
+ .phy_cap_info[5] =
+ IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
+ IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2,
+ .phy_cap_info[6] =
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
+ .phy_cap_info[7] =
+ IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR |
+ IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP7_MAX_NC_7,
+ .phy_cap_info[8] =
+ IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU,
+ },
+ /*
+ * Set default Tx/Rx HE MCS NSS Support field. Indicate support
+ * for up to 2 spatial streams and all MCS, without any special
+ * cases
+ */
+ .he_mcs_nss_supp = {
+ .rx_mcs_80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80 = cpu_to_le16(0xfffa),
+ .rx_mcs_160 = cpu_to_le16(0xfffa),
+ .tx_mcs_160 = cpu_to_le16(0xfffa),
+ .rx_mcs_80p80 = cpu_to_le16(0xffff),
+ .tx_mcs_80p80 = cpu_to_le16(0xffff),
+ },
+ /*
+ * Set default PPE thresholds, with PPET16 set to 0, PPET8 set
+ * to 7
+ */
+ .ppe_thres = {0x61, 0x1c, 0xc7, 0x71},
+ },
+};
+
+static void iwl_init_he_hw_capab(struct ieee80211_supported_band *sband,
+ u8 tx_chains, u8 rx_chains)
+{
+ if (sband->band == NL80211_BAND_2GHZ ||
+ sband->band == NL80211_BAND_5GHZ)
+ sband->iftype_data = &iwl_he_capa;
+ else
+ return;
+
+ sband->n_iftype_data = 1;
+
+ /* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */
+ if ((tx_chains & rx_chains) != ANT_AB) {
+ iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[1] &=
+ ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS;
+ iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[2] &=
+ ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_MAX_NSTS;
+ }
+}
+
static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
const __le16 *nvm_ch_flags, u8 tx_chains,
@@ -483,6 +585,9 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ,
tx_chains, rx_chains);
+ if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
+ iwl_init_he_hw_capab(sband, tx_chains, rx_chains);
+
sband = &data->bands[NL80211_BAND_5GHZ];
sband->band = NL80211_BAND_5GHZ;
sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
@@ -495,6 +600,9 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap,
tx_chains, rx_chains);
+ if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
+ iwl_init_he_hw_capab(sband, tx_chains, rx_chains);
+
if (n_channels != n_used)
IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
n_used, n_channels);
@@ -1293,6 +1401,8 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
!!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
nvm->sku_cap_11n_enable =
!!(mac_flags & NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
+ nvm->sku_cap_11ax_enable =
+ !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
nvm->sku_cap_band_24ghz_enable =
!!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
nvm->sku_cap_band_52ghz_enable =
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 1b9c627ee34d..279dd7b7a3fb 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -350,6 +350,8 @@ static inline int
iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
{
switch (rb_size) {
+ case IWL_AMSDU_2K:
+ return get_order(2 * 1024);
case IWL_AMSDU_4K:
return get_order(4 * 1024);
case IWL_AMSDU_8K:
@@ -438,6 +440,20 @@ struct iwl_trans_txq_scd_cfg {
};
/**
+ * struct iwl_trans_rxq_dma_data - RX queue DMA data
+ * @fr_bd_cb: DMA address of free BD cyclic buffer
+ * @fr_bd_wid: Initial write index of the free BD cyclic buffer
+ * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
+ * @ur_bd_cb: DMA address of used BD cyclic buffer
+ */
+struct iwl_trans_rxq_dma_data {
+ u64 fr_bd_cb;
+ u32 fr_bd_wid;
+ u64 urbd_stts_wrptr;
+ u64 ur_bd_cb;
+};
+
+/**
* struct iwl_trans_ops - transport specific operations
*
* All the handlers MUST be implemented
@@ -557,6 +573,8 @@ struct iwl_trans_ops {
int cmd_id, int size,
unsigned int queue_wdg_timeout);
void (*txq_free)(struct iwl_trans *trans, int queue);
+ int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
+ struct iwl_trans_rxq_dma_data *data);
void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
bool shared);
@@ -753,6 +771,7 @@ struct iwl_trans {
const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv;
const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
+ u32 dbg_dump_mask;
u8 dbg_dest_reg_num;
enum iwl_plat_pm_mode system_pm_mode;
@@ -945,6 +964,16 @@ iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
cfg, queue_wdg_timeout);
}
+static inline int
+iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
+ struct iwl_trans_rxq_dma_data *data)
+{
+ if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
+ return -ENOTSUPP;
+
+ return trans->ops->rxq_dma_data(trans, queue, data);
+}
+
static inline void
iwl_trans_txq_free(struct iwl_trans *trans, int queue)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 3fcf489f3120..79bdae994822 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1037,6 +1037,13 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR);
#endif
+ /*
+ * TODO: this is needed because the firmware is not stopping
+ * the recording automatically before entering D3. This can
+ * be removed once the FW starts doing that.
+ */
+ iwl_fw_dbg_stop_recording(&mvm->fwrt);
+
/* must be last -- this switches firmware state */
ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
if (ret)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 1c4178f20441..05b77419953c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1150,6 +1150,10 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
struct iwl_rx_mpdu_desc *desc;
int bin_len = count / 2;
int ret = -EINVAL;
+ size_t mpdu_cmd_hdr_size =
+ (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
+ sizeof(struct iwl_rx_mpdu_desc) :
+ IWL_RX_DESC_SIZE_V1;
if (!iwl_mvm_firmware_running(mvm))
return -EIO;
@@ -1168,7 +1172,7 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
goto out;
/* avoid invalid memory access */
- if (bin_len < sizeof(*pkt) + sizeof(*desc))
+ if (bin_len < sizeof(*pkt) + mpdu_cmd_hdr_size)
goto out;
/* check this is RX packet */
@@ -1179,7 +1183,7 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
/* check the length in metadata matches actual received length */
desc = (void *)pkt->data;
if (le16_to_cpu(desc->mpdu_len) !=
- (bin_len - sizeof(*desc) - sizeof(*pkt)))
+ (bin_len - mpdu_cmd_hdr_size - sizeof(*pkt)))
goto out;
local_bh_disable();
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 866c91c923be..6bb1a99a197a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -130,6 +130,41 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
}
+static int iwl_configure_rxq(struct iwl_mvm *mvm)
+{
+ int i, num_queues, size;
+ struct iwl_rfh_queue_config *cmd;
+
+ /* Do not configure default queue, it is configured via context info */
+ num_queues = mvm->trans->num_rx_queues - 1;
+
+ size = sizeof(*cmd) + num_queues * sizeof(struct iwl_rfh_queue_data);
+
+ cmd = kzalloc(size, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->num_queues = num_queues;
+
+ for (i = 0; i < num_queues; i++) {
+ struct iwl_trans_rxq_dma_data data;
+
+ cmd->data[i].q_num = i + 1;
+ iwl_trans_get_rxq_dma_data(mvm->trans, i + 1, &data);
+
+ cmd->data[i].fr_bd_cb = cpu_to_le64(data.fr_bd_cb);
+ cmd->data[i].urbd_stts_wrptr =
+ cpu_to_le64(data.urbd_stts_wrptr);
+ cmd->data[i].ur_bd_cb = cpu_to_le64(data.ur_bd_cb);
+ cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid);
+ }
+
+ return iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(DATA_PATH_GROUP,
+ RFH_QUEUE_CONFIG_CMD),
+ 0, size, cmd);
+}
+
static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
{
struct iwl_dqa_enable_cmd dqa_cmd = {
@@ -301,7 +336,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
if (ret) {
struct iwl_trans *trans = mvm->trans;
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22000)
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000)
IWL_ERR(mvm,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS),
@@ -1007,9 +1042,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
/* Init RSS configuration */
- /* TODO - remove 22000 disablement when we have RXQ config API */
- if (iwl_mvm_has_new_rx_api(mvm) &&
- mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_22000) {
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
+ ret = iwl_configure_rxq(mvm);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to configure RX queues: %d\n",
+ ret);
+ goto error;
+ }
+ }
+
+ if (iwl_mvm_has_new_rx_api(mvm)) {
ret = iwl_send_rss_cfg_cmd(mvm);
if (ret) {
IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 8ba16fc24e3a..b3fd20502abb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -780,6 +780,10 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
if (vif->probe_req_reg && vif->bss_conf.assoc && vif->p2p)
cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
+ if (vif->bss_conf.assoc && vif->bss_conf.he_support &&
+ !iwlwifi_mod_params.disable_11ax)
+ cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX);
+
return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index a6e072234398..b15b0d84bb7e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -36,6 +36,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -914,7 +915,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
enum ieee80211_ampdu_mlme_action action = params->action;
u16 tid = params->tid;
u16 *ssn = &params->ssn;
- u8 buf_size = params->buf_size;
+ u16 buf_size = params->buf_size;
bool amsdu = params->amsdu;
u16 timeout = params->timeout;
@@ -1897,6 +1898,194 @@ void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
iwl_mvm_mu_mimo_iface_iterator, notif);
}
+static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit)
+{
+ u8 byte_num = ppe_pos_bit / 8;
+ u8 bit_num = ppe_pos_bit % 8;
+ u8 residue_bits;
+ u8 res;
+
+ if (bit_num <= 5)
+ return (ppe[byte_num] >> bit_num) &
+ (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1);
+
+ /*
+ * If bit_num > 5, we have to combine bits with next byte.
+ * Calculate how many bits we need to take from current byte (called
+ * here "residue_bits"), and add them to bits from next byte.
+ */
+
+ residue_bits = 8 - bit_num;
+
+ res = (ppe[byte_num + 1] &
+ (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) <<
+ residue_bits;
+ res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1);
+
+ return res;
+}
+
+static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, u8 sta_id)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_he_sta_context_cmd sta_ctxt_cmd = {
+ .sta_id = sta_id,
+ .tid_limit = IWL_MAX_TID_COUNT,
+ .bss_color = vif->bss_conf.bss_color,
+ .htc_trig_based_pkt_ext = vif->bss_conf.htc_trig_based_pkt_ext,
+ .frame_time_rts_th =
+ cpu_to_le16(vif->bss_conf.frame_time_rts_th),
+ };
+ struct ieee80211_sta *sta;
+ u32 flags;
+ int i;
+
+ rcu_read_lock();
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]);
+ if (IS_ERR(sta)) {
+ rcu_read_unlock();
+ WARN(1, "Can't find STA to configure HE\n");
+ return;
+ }
+
+ if (!sta->he_cap.has_he) {
+ rcu_read_unlock();
+ return;
+ }
+
+ flags = 0;
+
+ /* HTC flags */
+ if (sta->he_cap.he_cap_elem.mac_cap_info[0] &
+ IEEE80211_HE_MAC_CAP0_HTC_HE)
+ sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT);
+ if ((sta->he_cap.he_cap_elem.mac_cap_info[1] &
+ IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) ||
+ (sta->he_cap.he_cap_elem.mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) {
+ u8 link_adap =
+ ((sta->he_cap.he_cap_elem.mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) +
+ (sta->he_cap.he_cap_elem.mac_cap_info[1] &
+ IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION);
+
+ if (link_adap == 2)
+ sta_ctxt_cmd.htc_flags |=
+ cpu_to_le32(IWL_HE_HTC_LINK_ADAP_UNSOLICITED);
+ else if (link_adap == 3)
+ sta_ctxt_cmd.htc_flags |=
+ cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH);
+ }
+ if (sta->he_cap.he_cap_elem.mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED)
+ sta_ctxt_cmd.htc_flags |=
+ cpu_to_le32(IWL_HE_HTC_UL_MU_RESP_SCHED);
+ if (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR)
+ sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP);
+ if (sta->he_cap.he_cap_elem.mac_cap_info[3] &
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL)
+ sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP);
+ if (sta->he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
+ sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP);
+
+ /* If PPE Thresholds exist, parse them into a FW-familiar format */
+ if (sta->he_cap.he_cap_elem.phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ u8 nss = (sta->he_cap.ppe_thres[0] &
+ IEEE80211_PPE_THRES_NSS_MASK) + 1;
+ u8 ru_index_bitmap =
+ (sta->he_cap.ppe_thres[0] &
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >>
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS;
+ u8 *ppe = &sta->he_cap.ppe_thres[0];
+ u8 ppe_pos_bit = 7; /* Starting after PPE header */
+
+ /*
+ * FW currently supports only nss == MAX_HE_SUPP_NSS
+ *
+ * If nss > MAX: we can ignore values we don't support
+ * If nss < MAX: we can set zeros in other streams
+ */
+ if (nss > MAX_HE_SUPP_NSS) {
+ IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
+ MAX_HE_SUPP_NSS);
+ nss = MAX_HE_SUPP_NSS;
+ }
+
+ for (i = 0; i < nss; i++) {
+ u8 ru_index_tmp = ru_index_bitmap << 1;
+ u8 bw;
+
+ for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX; bw++) {
+ ru_index_tmp >>= 1;
+ if (!(ru_index_tmp & 1))
+ continue;
+
+ sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][1] =
+ iwl_mvm_he_get_ppe_val(ppe,
+ ppe_pos_bit);
+ ppe_pos_bit +=
+ IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+ sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][0] =
+ iwl_mvm_he_get_ppe_val(ppe,
+ ppe_pos_bit);
+ ppe_pos_bit +=
+ IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+ }
+ }
+
+ flags |= STA_CTXT_HE_PACKET_EXT;
+ }
+ rcu_read_unlock();
+
+ /* Mark MU EDCA as enabled, unless none detected on some AC */
+ flags |= STA_CTXT_HE_MU_EDCA_CW;
+ for (i = 0; i < AC_NUM; i++) {
+ struct ieee80211_he_mu_edca_param_ac_rec *mu_edca =
+ &mvmvif->queue_params[i].mu_edca_param_rec;
+
+ if (!mvmvif->queue_params[i].mu_edca) {
+ flags &= ~STA_CTXT_HE_MU_EDCA_CW;
+ break;
+ }
+
+ sta_ctxt_cmd.trig_based_txf[i].cwmin =
+ cpu_to_le16(mu_edca->ecw_min_max & 0xf);
+ sta_ctxt_cmd.trig_based_txf[i].cwmax =
+ cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4);
+ sta_ctxt_cmd.trig_based_txf[i].aifsn =
+ cpu_to_le16(mu_edca->aifsn);
+ sta_ctxt_cmd.trig_based_txf[i].mu_time =
+ cpu_to_le16(mu_edca->mu_edca_timer);
+ }
+
+ if (vif->bss_conf.multi_sta_back_32bit)
+ flags |= STA_CTXT_HE_32BIT_BA_BITMAP;
+
+ if (vif->bss_conf.ack_enabled)
+ flags |= STA_CTXT_HE_ACK_ENABLED;
+
+ if (vif->bss_conf.uora_exists) {
+ flags |= STA_CTXT_HE_TRIG_RND_ALLOC;
+
+ sta_ctxt_cmd.rand_alloc_ecwmin =
+ vif->bss_conf.uora_ocw_range & 0x7;
+ sta_ctxt_cmd.rand_alloc_ecwmax =
+ (vif->bss_conf.uora_ocw_range >> 3) & 0x7;
+ }
+
+ /* TODO: support Multi BSSID IE */
+
+ sta_ctxt_cmd.flags = cpu_to_le32(flags);
+
+ if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(STA_HE_CTXT_CMD,
+ DATA_PATH_GROUP, 0),
+ 0, sizeof(sta_ctxt_cmd), &sta_ctxt_cmd))
+ IWL_ERR(mvm, "Failed to config FW to work HE!\n");
+}
+
static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
@@ -1910,8 +2099,13 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
* beacon interval, which was not known when the station interface was
* added.
*/
- if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
+ if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) {
+ if (vif->bss_conf.he_support &&
+ !iwlwifi_mod_params.disable_11ax)
+ iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id);
+
iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
+ }
/*
* If we're not associated yet, take the (new) BSSID before associating
@@ -4216,7 +4410,7 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
if (mvmsta->avg_energy) {
sinfo->signal_avg = mvmsta->avg_energy;
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
}
if (!fw_has_capa(&mvm->fw->ucode_capa,
@@ -4240,11 +4434,11 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons +
mvmvif->beacon_stats.accu_num_beacons;
- sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX);
if (mvmvif->beacon_stats.avg_signal) {
/* firmware only reports a value after RXing a few beacons */
sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal;
- sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
}
unlock:
mutex_unlock(&mvm->mutex);
@@ -4364,13 +4558,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
atomic_set(&mvm->queue_sync_counter,
mvm->trans->num_rx_queues);
- /* TODO - remove this when we have RXQ config API */
- if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_22000) {
- qmask = BIT(0);
- if (notif->sync)
- atomic_set(&mvm->queue_sync_counter, 1);
- }
-
ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size);
if (ret) {
IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 6a4ba160c59e..b3987a0a7018 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -654,7 +654,7 @@ struct iwl_mvm_tcm {
struct iwl_mvm_reorder_buffer {
u16 head_sn;
u16 num_stored;
- u8 buf_size;
+ u16 buf_size;
int queue;
u16 last_amsdu;
u8 last_sub_index;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index ff1e518096c5..0e26619fb330 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -448,6 +448,8 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(DQA_ENABLE_CMD),
HCMD_NAME(UPDATE_MU_GROUPS_CMD),
HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
+ HCMD_NAME(STA_HE_CTXT_CMD),
+ HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
HCMD_NAME(STA_PM_NOTIF),
HCMD_NAME(MU_GROUP_MGMT_NOTIF),
HCMD_NAME(RX_QUEUES_NOTIFICATION),
@@ -620,7 +622,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (iwl_mvm_has_new_rx_api(mvm)) {
op_mode->ops = &iwl_mvm_ops_mq;
- trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_desc);
+ trans->rx_mpdu_cmd_hdr_size =
+ (trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560) ?
+ sizeof(struct iwl_rx_mpdu_desc) :
+ IWL_RX_DESC_SIZE_V1;
} else {
op_mode->ops = &iwl_mvm_ops;
trans->rx_mpdu_cmd_hdr_size =
@@ -703,11 +709,17 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
}
/* the hardware splits the A-MSDU */
- if (mvm->cfg->mq_rx_supported)
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ trans_cfg.rx_buf_size = IWL_AMSDU_2K;
+ /* TODO: remove when balanced power mode is fw supported */
+ iwlmvm_mod_params.power_scheme = IWL_POWER_SCHEME_CAM;
+ } else if (mvm->cfg->mq_rx_supported) {
trans_cfg.rx_buf_size = IWL_AMSDU_4K;
+ }
trans->wide_cmd_header = true;
- trans_cfg.bc_table_dword = true;
+ trans_cfg.bc_table_dword =
+ mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560;
trans_cfg.command_groups = iwl_mvm_groups;
trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
@@ -738,6 +750,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv,
sizeof(trans->dbg_conf_tlv));
trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv;
+ trans->dbg_dump_mask = mvm->fw->dbg_dump_mask;
trans->iml = mvm->fw->iml;
trans->iml_len = mvm->fw->iml_len;
@@ -1003,10 +1016,8 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
list_add_tail(&entry->list, &mvm->async_handlers_list);
spin_unlock(&mvm->async_handlers_lock);
schedule_work(&mvm->async_handlers_wk);
- return;
+ break;
}
-
- iwl_fwrt_handle_notification(&mvm->fwrt, rxb);
}
static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index b8b2b819e8e7..8169d1450b3b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -183,6 +183,43 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
}
}
+static u16 rs_fw_he_ieee80211_mcs_to_rs_mcs(u16 mcs)
+{
+ switch (mcs) {
+ case IEEE80211_HE_MCS_SUPPORT_0_7:
+ return BIT(IWL_TLC_MNG_HT_RATE_MCS7 + 1) - 1;
+ case IEEE80211_HE_MCS_SUPPORT_0_9:
+ return BIT(IWL_TLC_MNG_HT_RATE_MCS9 + 1) - 1;
+ case IEEE80211_HE_MCS_SUPPORT_0_11:
+ return BIT(IWL_TLC_MNG_HT_RATE_MCS11 + 1) - 1;
+ case IEEE80211_HE_MCS_NOT_SUPPORTED:
+ return 0;
+ }
+
+ WARN(1, "invalid HE MCS %d\n", mcs);
+ return 0;
+}
+
+static void
+rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
+ const struct ieee80211_sta_he_cap *he_cap,
+ struct iwl_tlc_config_cmd *cmd)
+{
+ u16 mcs_160 = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_160);
+ u16 mcs_80 = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80);
+ int i;
+
+ for (i = 0; i < sta->rx_nss && i < MAX_NSS; i++) {
+ u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3;
+ u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3;
+
+ cmd->ht_rates[i][0] =
+ cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_80));
+ cmd->ht_rates[i][1] =
+ cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_160));
+ }
+}
+
static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
struct ieee80211_supported_band *sband,
struct iwl_tlc_config_cmd *cmd)
@@ -192,6 +229,7 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
unsigned long supp; /* must be unsigned long for for_each_set_bit */
const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
/* non HT rates */
supp = 0;
@@ -202,7 +240,11 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
cmd->non_ht_rates = cpu_to_le16(supp);
cmd->mode = IWL_TLC_MNG_MODE_NON_HT;
- if (vht_cap && vht_cap->vht_supported) {
+ /* HT/VHT rates */
+ if (he_cap && he_cap->has_he) {
+ cmd->mode = IWL_TLC_MNG_MODE_HE;
+ rs_fw_he_set_enabled_rates(sta, he_cap, cmd);
+ } else if (vht_cap && vht_cap->vht_supported) {
cmd->mode = IWL_TLC_MNG_MODE_VHT;
rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd);
} else if (ht_cap && ht_cap->ht_supported) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 642da10b0b7f..30cfd7d50bc9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -363,7 +363,8 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
idx += 1;
if ((idx >= IWL_FIRST_HT_RATE) && (idx <= IWL_LAST_HT_RATE))
return idx;
- } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+ } else if (rate_n_flags & RATE_MCS_VHT_MSK ||
+ rate_n_flags & RATE_MCS_HE_MSK) {
idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
idx += IWL_RATE_MCS_0_INDEX;
@@ -372,6 +373,9 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
idx++;
if ((idx >= IWL_FIRST_VHT_RATE) && (idx <= IWL_LAST_VHT_RATE))
return idx;
+ if ((rate_n_flags & RATE_MCS_HE_MSK) &&
+ (idx <= IWL_LAST_HE_RATE))
+ return idx;
} else {
/* legacy rate format, search for match in table */
@@ -516,6 +520,8 @@ static const char *rs_pretty_lq_type(enum iwl_table_type type)
[LQ_HT_MIMO2] = "HT MIMO",
[LQ_VHT_SISO] = "VHT SISO",
[LQ_VHT_MIMO2] = "VHT MIMO",
+ [LQ_HE_SISO] = "HE SISO",
+ [LQ_HE_MIMO2] = "HE MIMO",
};
if (type < LQ_NONE || type >= LQ_MAX)
@@ -900,7 +906,8 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
/* Legacy */
if (!(ucode_rate & RATE_MCS_HT_MSK) &&
- !(ucode_rate & RATE_MCS_VHT_MSK)) {
+ !(ucode_rate & RATE_MCS_VHT_MSK) &&
+ !(ucode_rate & RATE_MCS_HE_MSK)) {
if (num_of_ant == 1) {
if (band == NL80211_BAND_5GHZ)
rate->type = LQ_LEGACY_A;
@@ -911,7 +918,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
return 0;
}
- /* HT or VHT */
+ /* HT, VHT or HE */
if (ucode_rate & RATE_MCS_SGI_MSK)
rate->sgi = true;
if (ucode_rate & RATE_MCS_LDPC_MSK)
@@ -953,10 +960,24 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
} else {
WARN_ON_ONCE(1);
}
+ } else if (ucode_rate & RATE_MCS_HE_MSK) {
+ nss = ((ucode_rate & RATE_VHT_MCS_NSS_MSK) >>
+ RATE_VHT_MCS_NSS_POS) + 1;
+
+ if (nss == 1) {
+ rate->type = LQ_HE_SISO;
+ WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1,
+ "stbc %d bfer %d", rate->stbc, rate->bfer);
+ } else if (nss == 2) {
+ rate->type = LQ_HE_MIMO2;
+ WARN_ON_ONCE(num_of_ant != 2);
+ } else {
+ WARN_ON_ONCE(1);
+ }
}
WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_80 &&
- !is_vht(rate));
+ !is_he(rate) && !is_vht(rate));
return 0;
}
@@ -3606,7 +3627,8 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
u8 ant = (rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS;
if (!(rate & RATE_MCS_HT_MSK) &&
- !(rate & RATE_MCS_VHT_MSK)) {
+ !(rate & RATE_MCS_VHT_MSK) &&
+ !(rate & RATE_MCS_HE_MSK)) {
int index = iwl_hwrate_to_plcp_idx(rate);
return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps\n",
@@ -3625,6 +3647,11 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
mcs = rate & RATE_HT_MCS_INDEX_MSK;
nss = ((rate & RATE_HT_MCS_NSS_MSK)
>> RATE_HT_MCS_NSS_POS) + 1;
+ } else if (rate & RATE_MCS_HE_MSK) {
+ type = "HE";
+ mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
+ nss = ((rate & RATE_VHT_MCS_NSS_MSK)
+ >> RATE_VHT_MCS_NSS_POS) + 1;
} else {
type = "Unknown"; /* shouldn't happen */
}
@@ -3886,6 +3913,8 @@ static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file,
[IWL_RATE_MCS_7_INDEX] = "MCS7",
[IWL_RATE_MCS_8_INDEX] = "MCS8",
[IWL_RATE_MCS_9_INDEX] = "MCS9",
+ [IWL_RATE_MCS_10_INDEX] = "MCS10",
+ [IWL_RATE_MCS_11_INDEX] = "MCS11",
};
char *buff, *pos, *endpos;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index cffb8c852934..d2cf484e2b73 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -144,8 +144,13 @@ enum {
#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63)
#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
-#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (64)
-#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (64)
+/*
+ * FIXME - various places in firmware API still use u8,
+ * e.g. LQ command and SCD config command.
+ * This should be 256 instead.
+ */
+#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (255)
+#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (255)
#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
@@ -162,6 +167,8 @@ enum iwl_table_type {
LQ_HT_MIMO2,
LQ_VHT_SISO, /* VHT types */
LQ_VHT_MIMO2,
+ LQ_HE_SISO, /* HE types */
+ LQ_HE_MIMO2,
LQ_MAX,
};
@@ -183,11 +190,16 @@ struct rs_rate {
#define is_type_ht_mimo2(type) ((type) == LQ_HT_MIMO2)
#define is_type_vht_siso(type) ((type) == LQ_VHT_SISO)
#define is_type_vht_mimo2(type) ((type) == LQ_VHT_MIMO2)
-#define is_type_siso(type) (is_type_ht_siso(type) || is_type_vht_siso(type))
-#define is_type_mimo2(type) (is_type_ht_mimo2(type) || is_type_vht_mimo2(type))
+#define is_type_he_siso(type) ((type) == LQ_HE_SISO)
+#define is_type_he_mimo2(type) ((type) == LQ_HE_MIMO2)
+#define is_type_siso(type) (is_type_ht_siso(type) || is_type_vht_siso(type) || \
+ is_type_he_siso(type))
+#define is_type_mimo2(type) (is_type_ht_mimo2(type) || \
+ is_type_vht_mimo2(type) || is_type_he_mimo2(type))
#define is_type_mimo(type) (is_type_mimo2(type))
#define is_type_ht(type) (is_type_ht_siso(type) || is_type_ht_mimo2(type))
#define is_type_vht(type) (is_type_vht_siso(type) || is_type_vht_mimo2(type))
+#define is_type_he(type) (is_type_he_siso(type) || is_type_he_mimo2(type))
#define is_type_a_band(type) ((type) == LQ_LEGACY_A)
#define is_type_g_band(type) ((type) == LQ_LEGACY_G)
@@ -201,6 +213,7 @@ struct rs_rate {
#define is_mimo(rate) is_type_mimo((rate)->type)
#define is_ht(rate) is_type_ht((rate)->type)
#define is_vht(rate) is_type_vht((rate)->type)
+#define is_he(rate) is_type_he((rate)->type)
#define is_a_band(rate) is_type_a_band((rate)->type)
#define is_g_band(rate) is_type_g_band((rate)->type)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 129c4c09648d..b53148f972a4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -196,22 +198,31 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
struct sk_buff *skb, int queue,
struct ieee80211_sta *sta)
{
- if (iwl_mvm_check_pn(mvm, skb, queue, sta))
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+
+ if (iwl_mvm_check_pn(mvm, skb, queue, sta)) {
kfree_skb(skb);
- else
+ } else {
+ unsigned int radiotap_len = 0;
+
+ if (rx_status->flag & RX_FLAG_RADIOTAP_HE)
+ radiotap_len += sizeof(struct ieee80211_radiotap_he);
+ if (rx_status->flag & RX_FLAG_RADIOTAP_HE_MU)
+ radiotap_len += sizeof(struct ieee80211_radiotap_he_mu);
+ __skb_push(skb, radiotap_len);
ieee80211_rx_napi(mvm->hw, sta, skb, napi);
+ }
}
static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
- struct iwl_rx_mpdu_desc *desc,
- struct ieee80211_rx_status *rx_status)
+ struct ieee80211_rx_status *rx_status,
+ u32 rate_n_flags, int energy_a,
+ int energy_b)
{
- int energy_a, energy_b, max_energy;
- u32 rate_flags = le32_to_cpu(desc->rate_n_flags);
+ int max_energy;
+ u32 rate_flags = rate_n_flags;
- energy_a = desc->energy_a;
energy_a = energy_a ? -energy_a : S8_MIN;
- energy_b = desc->energy_b;
energy_b = energy_b ? -energy_b : S8_MIN;
max_energy = max(energy_a, energy_b);
@@ -356,7 +367,8 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
tid = IWL_MAX_TID_COUNT;
/* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
- sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+ sub_frame_idx = desc->amsdu_info &
+ IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
dup_data->last_seq[tid] == hdr->seq_ctrl &&
@@ -850,17 +862,41 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct ieee80211_rx_status *rx_status;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
- struct ieee80211_hdr *hdr = (void *)(pkt->data + sizeof(*desc));
+ struct ieee80211_hdr *hdr;
u32 len = le16_to_cpu(desc->mpdu_len);
- u32 rate_n_flags = le32_to_cpu(desc->rate_n_flags);
+ u32 rate_n_flags, gp2_on_air_rise;
u16 phy_info = le16_to_cpu(desc->phy_info);
struct ieee80211_sta *sta = NULL;
struct sk_buff *skb;
- u8 crypt_len = 0;
+ u8 crypt_len = 0, channel, energy_a, energy_b;
+ struct ieee80211_radiotap_he *he = NULL;
+ struct ieee80211_radiotap_he_mu *he_mu = NULL;
+ u32 he_type = 0xffffffff;
+ /* this is invalid e.g. because puncture type doesn't allow 0b11 */
+#define HE_PHY_DATA_INVAL ((u64)-1)
+ u64 he_phy_data = HE_PHY_DATA_INVAL;
+ size_t desc_size;
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
return;
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags);
+ channel = desc->v3.channel;
+ gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
+ energy_a = desc->v3.energy_a;
+ energy_b = desc->v3.energy_b;
+ desc_size = sizeof(*desc);
+ } else {
+ rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags);
+ channel = desc->v1.channel;
+ gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise);
+ energy_a = desc->v1.energy_a;
+ energy_b = desc->v1.energy_b;
+ desc_size = IWL_RX_DESC_SIZE_V1;
+ }
+
+ hdr = (void *)(pkt->data + desc_size);
/* Dont use dev_alloc_skb(), we'll have enough headroom once
* ieee80211_hdr pulled.
*/
@@ -882,6 +918,51 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status = IEEE80211_SKB_RXCB(skb);
+ if (rate_n_flags & RATE_MCS_HE_MSK) {
+ static const struct ieee80211_radiotap_he known = {
+ .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN),
+ .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN),
+ };
+ static const struct ieee80211_radiotap_he_mu mu_known = {
+ .flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN),
+ .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN),
+ };
+ unsigned int radiotap_len = 0;
+
+ he = skb_put_data(skb, &known, sizeof(known));
+ radiotap_len += sizeof(known);
+ rx_status->flag |= RX_FLAG_RADIOTAP_HE;
+
+ he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
+
+ if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) {
+ if (mvm->trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560)
+ he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
+ else
+ he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
+
+ if (he_type == RATE_MCS_HE_TYPE_MU) {
+ he_mu = skb_put_data(skb, &mu_known,
+ sizeof(mu_known));
+ radiotap_len += sizeof(mu_known);
+ rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU;
+ }
+ }
+
+ /* temporarily hide the radiotap data */
+ __skb_pull(skb, radiotap_len);
+ }
+
+ rx_status = IEEE80211_SKB_RXCB(skb);
+
if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, phy_info, desc,
le32_to_cpu(pkt->len_n_flags), queue,
&crypt_len)) {
@@ -904,20 +985,80 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
- rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise);
+ u64 tsf_on_air_rise;
+
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ tsf_on_air_rise = le64_to_cpu(desc->v3.tsf_on_air_rise);
+ else
+ tsf_on_air_rise = le64_to_cpu(desc->v1.tsf_on_air_rise);
+
+ rx_status->mactime = tsf_on_air_rise;
/* TSF as indicated by the firmware is at INA time */
rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
+ } else if (he_type == RATE_MCS_HE_TYPE_SU) {
+ u64 he_phy_data;
+
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
+ else
+ he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
+
+ he->data1 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN);
+ if (FIELD_GET(IWL_RX_HE_PHY_UPLINK,
+ he_phy_data))
+ he->data3 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
+
+ if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
+ rx_status->ampdu_reference = mvm->ampdu_ref;
+ mvm->ampdu_ref++;
+
+ rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+ if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
+ he_phy_data))
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
+ }
+ } else if (he_mu && he_phy_data != HE_PHY_DATA_INVAL) {
+ he_mu->flags1 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIBG_SYM_OR_USER_NUM_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
+ he_mu->flags1 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_DCM,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM);
+ he_mu->flags1 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_MCS_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS);
+ he_mu->flags2 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_COMPRESSION,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
+ he_mu->flags2 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PREAMBLE_PUNC_TYPE_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
}
- rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise);
- rx_status->band = desc->channel > 14 ? NL80211_BAND_5GHZ :
- NL80211_BAND_2GHZ;
- rx_status->freq = ieee80211_channel_to_frequency(desc->channel,
+ rx_status->device_timestamp = gp2_on_air_rise;
+ rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
+ NL80211_BAND_2GHZ;
+ rx_status->freq = ieee80211_channel_to_frequency(channel,
rx_status->band);
- iwl_mvm_get_signal_strength(mvm, desc, rx_status);
+ iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a,
+ energy_b);
/* update aggregation data for monitor sake on default queue */
if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
+ u64 he_phy_data;
+
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
+ else
+ he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
rx_status->ampdu_reference = mvm->ampdu_ref;
@@ -925,6 +1066,15 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (toggle_bit != mvm->ampdu_toggle) {
mvm->ampdu_ref++;
mvm->ampdu_toggle = toggle_bit;
+
+ if (he_phy_data != HE_PHY_DATA_INVAL &&
+ he_type == RATE_MCS_HE_TYPE_MU) {
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+ if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
+ he_phy_data))
+ rx_status->flag |=
+ RX_FLAG_AMPDU_EOF_BIT;
+ }
}
}
@@ -1033,7 +1183,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
}
}
- /* Set up the HT phy flags */
switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
case RATE_MCS_CHAN_WIDTH_20:
break;
@@ -1048,6 +1197,70 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
break;
}
+ if (he_type == RATE_MCS_HE_TYPE_EXT_SU &&
+ rate_n_flags & RATE_MCS_HE_106T_MSK) {
+ rx_status->bw = RATE_INFO_BW_HE_RU;
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ }
+
+ if (rate_n_flags & RATE_MCS_HE_MSK &&
+ phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD &&
+ he_type == RATE_MCS_HE_TYPE_MU) {
+ /*
+ * Unfortunately, we have to leave the mac80211 data
+ * incorrect for the case that we receive an HE-MU
+ * transmission and *don't* have the he_mu pointer,
+ * i.e. we don't have the phy data (due to the bits
+ * being used for TSF). This shouldn't happen though
+ * as management frames where we need the TSF/timers
+ * are not be transmitted in HE-MU, I think.
+ */
+ u8 ru = FIELD_GET(IWL_RX_HE_PHY_RU_ALLOC_MASK, he_phy_data);
+ u8 offs = 0;
+
+ rx_status->bw = RATE_INFO_BW_HE_RU;
+
+ switch (ru) {
+ case 0 ... 36:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ offs = ru;
+ break;
+ case 37 ... 52:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+ offs = ru - 37;
+ break;
+ case 53 ... 60:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ offs = ru - 53;
+ break;
+ case 61 ... 64:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+ offs = ru - 61;
+ break;
+ case 65 ... 66:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+ offs = ru - 65;
+ break;
+ case 67:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+ break;
+ case 68:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
+ break;
+ }
+ he->data2 |=
+ le16_encode_bits(offs,
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
+ he->data2 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN);
+ if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80)
+ he->data2 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
+ } else if (he) {
+ he->data1 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
+ }
+
if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
rate_n_flags & RATE_MCS_SGI_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
@@ -1072,6 +1285,119 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
if (rate_n_flags & RATE_MCS_BF_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_BF;
+ } else if (he) {
+ u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
+ RATE_MCS_STBC_POS;
+ rx_status->nss =
+ ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+ RATE_VHT_MCS_NSS_POS) + 1;
+ rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+ rx_status->encoding = RX_ENC_HE;
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+ if (rate_n_flags & RATE_MCS_BF_MSK)
+ rx_status->enc_flags |= RX_ENC_FLAG_BF;
+
+ rx_status->he_dcm =
+ !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
+
+#define CHECK_TYPE(F) \
+ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \
+ (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
+
+ CHECK_TYPE(SU);
+ CHECK_TYPE(EXT_SU);
+ CHECK_TYPE(MU);
+ CHECK_TYPE(TRIG);
+
+ he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS);
+
+ if (rate_n_flags & RATE_MCS_BF_POS)
+ he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF);
+
+ switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >>
+ RATE_MCS_HE_GI_LTF_POS) {
+ case 0:
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ break;
+ case 1:
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ break;
+ case 2:
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ break;
+ case 3:
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ else
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ break;
+ }
+
+ switch (he_type) {
+ case RATE_MCS_HE_TYPE_SU: {
+ u16 val;
+
+ /* LTF syms correspond to streams */
+ he->data2 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
+ switch (rx_status->nss) {
+ case 1:
+ val = 0;
+ break;
+ case 2:
+ val = 1;
+ break;
+ case 3:
+ case 4:
+ val = 2;
+ break;
+ case 5:
+ case 6:
+ val = 3;
+ break;
+ case 7:
+ case 8:
+ val = 4;
+ break;
+ default:
+ WARN_ONCE(1, "invalid nss: %d\n",
+ rx_status->nss);
+ val = 0;
+ }
+ he->data5 |=
+ le16_encode_bits(val,
+ IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
+ }
+ break;
+ case RATE_MCS_HE_TYPE_MU: {
+ u16 val;
+ u64 he_phy_data;
+
+ if (mvm->trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560)
+ he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
+ else
+ he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
+
+ if (he_phy_data == HE_PHY_DATA_INVAL)
+ break;
+
+ val = FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK,
+ he_phy_data);
+
+ he->data2 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
+ he->data5 |=
+ cpu_to_le16(FIELD_PREP(
+ IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS,
+ val));
+ }
+ break;
+ case RATE_MCS_HE_TYPE_EXT_SU:
+ case RATE_MCS_HE_TYPE_TRIG:
+ /* not supported yet */
+ break;
+ }
} else {
int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
rx_status->band);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 9263b9aa8b72..18db1ed92d9b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -2184,7 +2184,7 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
struct iwl_mvm_baid_data *data,
- u16 ssn, u8 buf_size)
+ u16 ssn, u16 buf_size)
{
int i;
@@ -2211,7 +2211,7 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
}
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- int tid, u16 ssn, bool start, u8 buf_size, u16 timeout)
+ int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
{
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_add_sta_cmd cmd = {};
@@ -2273,7 +2273,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (start) {
cmd.add_immediate_ba_tid = (u8) tid;
cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
- cmd.rx_ba_window = cpu_to_le16((u16)buf_size);
+ cmd.rx_ba_window = cpu_to_le16(buf_size);
} else {
cmd.remove_immediate_ba_tid = (u8) tid;
}
@@ -2559,7 +2559,7 @@ out:
}
int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid, u8 buf_size,
+ struct ieee80211_sta *sta, u16 tid, u16 buf_size,
bool amsdu)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 1c43ea8dd8cc..0fc211108149 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -412,7 +412,7 @@ struct iwl_mvm_sta {
u32 tfd_queue_msk;
u32 mac_id_n_color;
u16 tid_disable_agg;
- u8 max_agg_bufsize;
+ u16 max_agg_bufsize;
enum iwl_sta_type sta_type;
enum ieee80211_sta_state sta_state;
bool bt_reduced_txpower;
@@ -518,11 +518,11 @@ void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
/* AMPDU */
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- int tid, u16 ssn, bool start, u8 buf_size, u16 timeout);
+ int tid, u16 ssn, bool start, u16 buf_size, u16 timeout);
int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn);
int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid, u8 buf_size,
+ struct ieee80211_sta *sta, u16 tid, u16 buf_size,
bool amsdu);
int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index cf2591f2ac23..ff193dca2020 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -484,13 +484,15 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
/* Make sure we zero enough of dev_cmd */
BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd));
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd));
memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd));
dev_cmd->hdr.cmd = TX_CMD;
if (iwl_mvm_has_new_tx_api(mvm)) {
- struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
u16 offload_assist = 0;
+ u32 rate_n_flags = 0;
+ u16 flags = 0;
if (ieee80211_is_data_qos(hdr->frame_control)) {
u8 *qc = ieee80211_get_qos_ctl(hdr);
@@ -507,25 +509,43 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
!(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
offload_assist |= BIT(TX_CMD_OFFLD_PAD);
- cmd->offload_assist |= cpu_to_le16(offload_assist);
+ if (!info->control.hw_key)
+ flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
- /* Total # bytes to be transmitted */
- cmd->len = cpu_to_le16((u16)skb->len);
+ /* For data packets rate info comes from the fw */
+ if (!(ieee80211_is_data(hdr->frame_control) && sta)) {
+ flags |= IWL_TX_FLAGS_CMD_RATE;
+ rate_n_flags = iwl_mvm_get_tx_rate(mvm, info, sta);
+ }
- /* Copy MAC header from skb into command buffer */
- memcpy(cmd->hdr, hdr, hdrlen);
+ if (mvm->trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560) {
+ struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload;
- if (!info->control.hw_key)
- cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_ENCRYPT_DIS);
+ cmd->offload_assist |= cpu_to_le32(offload_assist);
- /* For data packets rate info comes from the fw */
- if (ieee80211_is_data(hdr->frame_control) && sta)
- goto out;
+ /* Total # bytes to be transmitted */
+ cmd->len = cpu_to_le16((u16)skb->len);
- cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_CMD_RATE);
- cmd->rate_n_flags =
- cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta));
+ /* Copy MAC header from skb into command buffer */
+ memcpy(cmd->hdr, hdr, hdrlen);
+ cmd->flags = cpu_to_le16(flags);
+ cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
+ } else {
+ struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
+
+ cmd->offload_assist |= cpu_to_le16(offload_assist);
+
+ /* Total # bytes to be transmitted */
+ cmd->len = cpu_to_le16((u16)skb->len);
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(cmd->hdr, hdr, hdrlen);
+
+ cmd->flags = cpu_to_le32(flags);
+ cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
+ }
goto out;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
new file mode 100644
index 000000000000..2146fda8da2f
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -0,0 +1,207 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include "iwl-trans.h"
+#include "iwl-fh.h"
+#include "iwl-context-info-gen3.h"
+#include "internal.h"
+#include "iwl-prph.h"
+
+int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
+ const struct fw_img *fw)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_context_info_gen3 *ctxt_info_gen3;
+ struct iwl_prph_scratch *prph_scratch;
+ struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
+ struct iwl_prph_info *prph_info;
+ void *iml_img;
+ u32 control_flags = 0;
+ int ret;
+
+ /* Allocate prph scratch */
+ prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
+ &trans_pcie->prph_scratch_dma_addr,
+ GFP_KERNEL);
+ if (!prph_scratch)
+ return -ENOMEM;
+
+ prph_sc_ctrl = &prph_scratch->ctrl_cfg;
+
+ prph_sc_ctrl->version.version = 0;
+ prph_sc_ctrl->version.mac_id =
+ cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
+ prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4);
+
+ control_flags = IWL_PRPH_SCRATCH_RB_SIZE_4K |
+ IWL_PRPH_SCRATCH_MTR_MODE |
+ (IWL_PRPH_MTR_FORMAT_256B &
+ IWL_PRPH_SCRATCH_MTR_FORMAT) |
+ IWL_PRPH_SCRATCH_EARLY_DEBUG_EN |
+ IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
+ prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
+
+ /* initialize RX default queue */
+ prph_sc_ctrl->rbd_cfg.free_rbd_addr =
+ cpu_to_le64(trans_pcie->rxq->bd_dma);
+
+ /* Configure debug, for integration */
+ iwl_pcie_alloc_fw_monitor(trans, 0);
+ prph_sc_ctrl->hwm_cfg.hwm_base_addr =
+ cpu_to_le64(trans_pcie->fw_mon_phys);
+ prph_sc_ctrl->hwm_cfg.hwm_size =
+ cpu_to_le32(trans_pcie->fw_mon_size);
+
+ /* allocate ucode sections in dram and set addresses */
+ ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
+ if (ret) {
+ dma_free_coherent(trans->dev,
+ sizeof(*prph_scratch),
+ prph_scratch,
+ trans_pcie->prph_scratch_dma_addr);
+ return ret;
+ }
+
+ /* Allocate prph information
+ * currently we don't assign to the prph info anything, but it would get
+ * assigned later */
+ prph_info = dma_alloc_coherent(trans->dev, sizeof(*prph_info),
+ &trans_pcie->prph_info_dma_addr,
+ GFP_KERNEL);
+ if (!prph_info)
+ return -ENOMEM;
+
+ /* Allocate context info */
+ ctxt_info_gen3 = dma_alloc_coherent(trans->dev,
+ sizeof(*ctxt_info_gen3),
+ &trans_pcie->ctxt_info_dma_addr,
+ GFP_KERNEL);
+ if (!ctxt_info_gen3)
+ return -ENOMEM;
+
+ ctxt_info_gen3->prph_info_base_addr =
+ cpu_to_le64(trans_pcie->prph_info_dma_addr);
+ ctxt_info_gen3->prph_scratch_base_addr =
+ cpu_to_le64(trans_pcie->prph_scratch_dma_addr);
+ ctxt_info_gen3->prph_scratch_size =
+ cpu_to_le32(sizeof(*prph_scratch));
+ ctxt_info_gen3->cr_head_idx_arr_base_addr =
+ cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
+ ctxt_info_gen3->tr_tail_idx_arr_base_addr =
+ cpu_to_le64(trans_pcie->rxq->tr_tail_dma);
+ ctxt_info_gen3->cr_tail_idx_arr_base_addr =
+ cpu_to_le64(trans_pcie->rxq->cr_tail_dma);
+ ctxt_info_gen3->cr_idx_arr_size =
+ cpu_to_le16(IWL_NUM_OF_COMPLETION_RINGS);
+ ctxt_info_gen3->tr_idx_arr_size =
+ cpu_to_le16(IWL_NUM_OF_TRANSFER_RINGS);
+ ctxt_info_gen3->mtr_base_addr =
+ cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
+ ctxt_info_gen3->mcr_base_addr =
+ cpu_to_le64(trans_pcie->rxq->used_bd_dma);
+ ctxt_info_gen3->mtr_size =
+ cpu_to_le16(TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS));
+ ctxt_info_gen3->mcr_size =
+ cpu_to_le16(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE));
+
+ trans_pcie->ctxt_info_gen3 = ctxt_info_gen3;
+ trans_pcie->prph_info = prph_info;
+ trans_pcie->prph_scratch = prph_scratch;
+
+ /* Allocate IML */
+ iml_img = dma_alloc_coherent(trans->dev, trans->iml_len,
+ &trans_pcie->iml_dma_addr, GFP_KERNEL);
+ if (!iml_img)
+ return -ENOMEM;
+
+ memcpy(iml_img, trans->iml, trans->iml_len);
+
+ iwl_enable_interrupts(trans);
+
+ /* kick FW self load */
+ iwl_write64(trans, CSR_CTXT_INFO_ADDR,
+ trans_pcie->ctxt_info_dma_addr);
+ iwl_write64(trans, CSR_IML_DATA_ADDR,
+ trans_pcie->iml_dma_addr);
+ iwl_write32(trans, CSR_IML_SIZE_ADDR, trans->iml_len);
+ iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, CSR_AUTO_FUNC_BOOT_ENA);
+ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT);
+
+ return 0;
+}
+
+void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (!trans_pcie->ctxt_info_gen3)
+ return;
+
+ dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
+ trans_pcie->ctxt_info_gen3,
+ trans_pcie->ctxt_info_dma_addr);
+ trans_pcie->ctxt_info_dma_addr = 0;
+ trans_pcie->ctxt_info_gen3 = NULL;
+
+ iwl_pcie_ctxt_info_free_fw_img(trans);
+
+ dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch),
+ trans_pcie->prph_scratch,
+ trans_pcie->prph_scratch_dma_addr);
+ trans_pcie->prph_scratch_dma_addr = 0;
+ trans_pcie->prph_scratch = NULL;
+
+ dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_info),
+ trans_pcie->prph_info,
+ trans_pcie->prph_info_dma_addr);
+ trans_pcie->prph_info_dma_addr = 0;
+ trans_pcie->prph_info = NULL;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index 3fc4343581ee..b2cd7ef5fc3a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -19,6 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -55,57 +57,6 @@
#include "internal.h"
#include "iwl-prph.h"
-static int iwl_pcie_get_num_sections(const struct fw_img *fw,
- int start)
-{
- int i = 0;
-
- while (start < fw->num_sec &&
- fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
- fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
- start++;
- i++;
- }
-
- return i;
-}
-
-static int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
- const struct fw_desc *sec,
- struct iwl_dram_data *dram)
-{
- dram->block = dma_alloc_coherent(trans->dev, sec->len,
- &dram->physical,
- GFP_KERNEL);
- if (!dram->block)
- return -ENOMEM;
-
- dram->size = sec->len;
- memcpy(dram->block, sec->data, sec->len);
-
- return 0;
-}
-
-static void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
- int i;
-
- if (!dram->fw) {
- WARN_ON(dram->fw_cnt);
- return;
- }
-
- for (i = 0; i < dram->fw_cnt; i++)
- dma_free_coherent(trans->dev, dram->fw[i].size,
- dram->fw[i].block, dram->fw[i].physical);
-
- kfree(dram->fw);
- dram->fw_cnt = 0;
- dram->fw = NULL;
-}
-
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -128,13 +79,12 @@ void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
dram->paging = NULL;
}
-static int iwl_pcie_ctxt_info_init_fw_sec(struct iwl_trans *trans,
- const struct fw_img *fw,
- struct iwl_context_info *ctxt_info)
+int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
+ const struct fw_img *fw,
+ struct iwl_context_info_dram *ctxt_dram)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
- struct iwl_context_info_dram *ctxt_dram = &ctxt_info->dram;
int i, ret, lmac_cnt, umac_cnt, paging_cnt;
if (WARN(dram->paging,
@@ -247,7 +197,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS);
/* allocate ucode sections in dram and set addresses */
- ret = iwl_pcie_ctxt_info_init_fw_sec(trans, fw, ctxt_info);
+ ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram);
if (ret) {
dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
ctxt_info, trans_pcie->ctxt_info_dma_addr);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 8520523b91b4..562cc79288a6 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -828,19 +828,32 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xA370, 0x42A4, iwl9462_2ac_cfg_soc)},
/* 22000 Series */
- {IWL_PCI_DEVICE(0x2720, 0x0A10, iwl22000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)},
{IWL_PCI_DEVICE(0x2720, 0x0000, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0040, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0078, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0070, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x0030, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x1080, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0040, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0078, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)},
+ {IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22560_2ax_cfg_su_cdb)},
+ {IWL_PCI_DEVICE(0x40C0, 0x0010, iwl22560_2ax_cfg_su_cdb)},
+ {IWL_PCI_DEVICE(0x40c0, 0x0090, iwl22560_2ax_cfg_su_cdb)},
+ {IWL_PCI_DEVICE(0x40C0, 0x0310, iwl22560_2ax_cfg_su_cdb)},
+ {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22560_2ax_cfg_su_cdb)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0040, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0070, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0078, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0040, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x00B0, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl22000_2ax_cfg_hr)},
#endif /* CONFIG_IWLMVM */
@@ -1003,6 +1016,10 @@ static int iwl_pci_resume(struct device *device)
if (!trans->op_mode)
return 0;
+ /* In WOWLAN, let iwl_trans_pcie_d3_resume do the rest of the work */
+ if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+ return 0;
+
/* reconfigure the MSI-X mapping to get the correct IRQ for rfkill */
iwl_pcie_conf_msix_hw(trans_pcie);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 45ea32796cda..b63d44b7cd7c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -3,6 +3,7 @@
* Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -17,8 +18,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ * this program.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
@@ -45,6 +45,7 @@
#include "iwl-debug.h"
#include "iwl-io.h"
#include "iwl-op-mode.h"
+#include "iwl-drv.h"
/* We need 2 entries for the TX command and header, and another one might
* be needed for potential data in the SKB's head. The remaining ones can
@@ -59,6 +60,7 @@
#define RX_POST_REQ_ALLOC 2
#define RX_CLAIM_REQ_ALLOC 8
#define RX_PENDING_WATERMARK 16
+#define FIRST_RX_QUEUE 512
struct iwl_host_cmd;
@@ -71,6 +73,7 @@ struct iwl_host_cmd;
* @page: driver's pointer to the rxb page
* @invalid: rxb is in driver ownership - not owned by HW
* @vid: index of this rxb in the global table
+ * @size: size used from the buffer
*/
struct iwl_rx_mem_buffer {
dma_addr_t page_dma;
@@ -78,6 +81,7 @@ struct iwl_rx_mem_buffer {
u16 vid;
bool invalid;
struct list_head list;
+ u32 size;
};
/**
@@ -98,14 +102,121 @@ struct isr_statistics {
u32 unhandled;
};
+#define IWL_CD_STTS_OPTIMIZED_POS 0
+#define IWL_CD_STTS_OPTIMIZED_MSK 0x01
+#define IWL_CD_STTS_TRANSFER_STATUS_POS 1
+#define IWL_CD_STTS_TRANSFER_STATUS_MSK 0x0E
+#define IWL_CD_STTS_WIFI_STATUS_POS 4
+#define IWL_CD_STTS_WIFI_STATUS_MSK 0xF0
+
+/**
+ * enum iwl_completion_desc_transfer_status - transfer status (bits 1-3)
+ * @IWL_CD_STTS_END_TRANSFER: successful transfer complete.
+ * In sniffer mode, when split is used, set in last CD completion. (RX)
+ * @IWL_CD_STTS_OVERFLOW: In sniffer mode, when using split - used for
+ * all CD completion. (RX)
+ * @IWL_CD_STTS_ABORTED: CR abort / close flow. (RX)
+ */
+enum iwl_completion_desc_transfer_status {
+ IWL_CD_STTS_UNUSED,
+ IWL_CD_STTS_UNUSED_2,
+ IWL_CD_STTS_END_TRANSFER,
+ IWL_CD_STTS_OVERFLOW,
+ IWL_CD_STTS_ABORTED,
+ IWL_CD_STTS_ERROR,
+};
+
+/**
+ * enum iwl_completion_desc_wifi_status - wifi status (bits 4-7)
+ * @IWL_CD_STTS_VALID: the packet is valid (RX)
+ * @IWL_CD_STTS_FCS_ERR: frame check sequence error (RX)
+ * @IWL_CD_STTS_SEC_KEY_ERR: error handling the security key of rx (RX)
+ * @IWL_CD_STTS_DECRYPTION_ERR: error decrypting the frame (RX)
+ * @IWL_CD_STTS_DUP: duplicate packet (RX)
+ * @IWL_CD_STTS_ICV_MIC_ERR: MIC error (RX)
+ * @IWL_CD_STTS_INTERNAL_SNAP_ERR: problems removing the snap (RX)
+ * @IWL_CD_STTS_SEC_PORT_FAIL: security port fail (RX)
+ * @IWL_CD_STTS_BA_OLD_SN: block ack received old SN (RX)
+ * @IWL_CD_STTS_QOS_NULL: QoS null packet (RX)
+ * @IWL_CD_STTS_MAC_HDR_ERR: MAC header conversion error (RX)
+ * @IWL_CD_STTS_MAX_RETRANS: reached max number of retransmissions (TX)
+ * @IWL_CD_STTS_EX_LIFETIME: exceeded lifetime (TX)
+ * @IWL_CD_STTS_NOT_USED: completed but not used (RX)
+ * @IWL_CD_STTS_REPLAY_ERR: pn check failed, replay error (RX)
+ */
+enum iwl_completion_desc_wifi_status {
+ IWL_CD_STTS_VALID,
+ IWL_CD_STTS_FCS_ERR,
+ IWL_CD_STTS_SEC_KEY_ERR,
+ IWL_CD_STTS_DECRYPTION_ERR,
+ IWL_CD_STTS_DUP,
+ IWL_CD_STTS_ICV_MIC_ERR,
+ IWL_CD_STTS_INTERNAL_SNAP_ERR,
+ IWL_CD_STTS_SEC_PORT_FAIL,
+ IWL_CD_STTS_BA_OLD_SN,
+ IWL_CD_STTS_QOS_NULL,
+ IWL_CD_STTS_MAC_HDR_ERR,
+ IWL_CD_STTS_MAX_RETRANS,
+ IWL_CD_STTS_EX_LIFETIME,
+ IWL_CD_STTS_NOT_USED,
+ IWL_CD_STTS_REPLAY_ERR,
+};
+
+#define IWL_RX_TD_TYPE_MSK 0xff000000
+#define IWL_RX_TD_SIZE_MSK 0x00ffffff
+#define IWL_RX_TD_SIZE_2K BIT(11)
+#define IWL_RX_TD_TYPE 0
+
+/**
+ * struct iwl_rx_transfer_desc - transfer descriptor
+ * @type_n_size: buffer type (bit 0: external buff valid,
+ * bit 1: optional footer valid, bit 2-7: reserved)
+ * and buffer size
+ * @addr: ptr to free buffer start address
+ * @rbid: unique tag of the buffer
+ * @reserved: reserved
+ */
+struct iwl_rx_transfer_desc {
+ __le32 type_n_size;
+ __le64 addr;
+ __le16 rbid;
+ __le16 reserved;
+} __packed;
+
+#define IWL_RX_CD_SIZE 0xffffff00
+
+/**
+ * struct iwl_rx_completion_desc - completion descriptor
+ * @type: buffer type (bit 0: external buff valid,
+ * bit 1: optional footer valid, bit 2-7: reserved)
+ * @status: status of the completion
+ * @reserved1: reserved
+ * @rbid: unique tag of the received buffer
+ * @size: buffer size, masked by IWL_RX_CD_SIZE
+ * @reserved2: reserved
+ */
+struct iwl_rx_completion_desc {
+ u8 type;
+ u8 status;
+ __le16 reserved1;
+ __le16 rbid;
+ __le32 size;
+ u8 reserved2[22];
+} __packed;
+
/**
* struct iwl_rxq - Rx queue
* @id: queue index
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
* Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
+ * In 22560 devices it is a pointer to a list of iwl_rx_transfer_desc's
* @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
* @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
* @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
+ * @tr_tail: driver's pointer to the transmission ring tail buffer
+ * @tr_tail_dma: physical address of the buffer for the transmission ring tail
+ * @cr_tail: driver's pointer to the completion ring tail buffer
+ * @cr_tail_dma: physical address of the buffer for the completion ring tail
* @read: Shared index to newest available Rx buffer
* @write: Shared index to oldest written Rx packet
* @free_count: Number of pre-allocated buffers in rx_free
@@ -125,8 +236,16 @@ struct iwl_rxq {
int id;
void *bd;
dma_addr_t bd_dma;
- __le32 *used_bd;
+ union {
+ void *used_bd;
+ __le32 *bd_32;
+ struct iwl_rx_completion_desc *cd;
+ };
dma_addr_t used_bd_dma;
+ __le16 *tr_tail;
+ dma_addr_t tr_tail_dma;
+ __le16 *cr_tail;
+ dma_addr_t cr_tail_dma;
u32 read;
u32 write;
u32 free_count;
@@ -136,7 +255,7 @@ struct iwl_rxq {
struct list_head rx_free;
struct list_head rx_used;
bool need_update;
- struct iwl_rb_status *rb_stts;
+ void *rb_stts;
dma_addr_t rb_stts_dma;
spinlock_t lock;
struct napi_struct napi;
@@ -175,18 +294,36 @@ struct iwl_dma_ptr {
* iwl_queue_inc_wrap - increment queue index, wrap back to beginning
* @index -- current index
*/
-static inline int iwl_queue_inc_wrap(int index)
+static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
{
- return ++index & (TFD_QUEUE_SIZE_MAX - 1);
+ return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1);
+}
+
+/**
+ * iwl_get_closed_rb_stts - get closed rb stts from different structs
+ * @rxq - the rxq to get the rb stts from
+ */
+static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
+ struct iwl_rxq *rxq)
+{
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ __le16 *rb_stts = rxq->rb_stts;
+
+ return READ_ONCE(*rb_stts);
+ } else {
+ struct iwl_rb_status *rb_stts = rxq->rb_stts;
+
+ return READ_ONCE(rb_stts->closed_rb_num);
+ }
}
/**
* iwl_queue_dec_wrap - decrement queue index, wrap back to end
* @index -- current index
*/
-static inline int iwl_queue_dec_wrap(int index)
+static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
{
- return --index & (TFD_QUEUE_SIZE_MAX - 1);
+ return --index & (trans->cfg->base_params->max_tfd_queue_size - 1);
}
struct iwl_cmd_meta {
@@ -315,6 +452,18 @@ enum iwl_shared_irq_flags {
};
/**
+ * enum iwl_image_response_code - image response values
+ * @IWL_IMAGE_RESP_DEF: the default value of the register
+ * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully
+ * @IWL_IMAGE_RESP_FAIL: iml reading failed
+ */
+enum iwl_image_response_code {
+ IWL_IMAGE_RESP_DEF = 0,
+ IWL_IMAGE_RESP_SUCCESS = 1,
+ IWL_IMAGE_RESP_FAIL = 2,
+};
+
+/**
* struct iwl_dram_data
* @physical: page phy pointer
* @block: pointer to the allocated block/page
@@ -347,6 +496,12 @@ struct iwl_self_init_dram {
* @global_table: table mapping received VID from hw to rxb
* @rba: allocator for RX replenishing
* @ctxt_info: context information for FW self init
+ * @ctxt_info_gen3: context information for gen3 devices
+ * @prph_info: prph info for self init
+ * @prph_scratch: prph scratch for self init
+ * @ctxt_info_dma_addr: dma addr of context information
+ * @prph_info_dma_addr: dma addr of prph info
+ * @prph_scratch_dma_addr: dma addr of prph scratch
* @ctxt_info_dma_addr: dma addr of context information
* @init_dram: DRAM data of firmware image (including paging).
* Context information addresses will be taken from here.
@@ -391,8 +546,16 @@ struct iwl_trans_pcie {
struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
struct iwl_rb_allocator rba;
- struct iwl_context_info *ctxt_info;
+ union {
+ struct iwl_context_info *ctxt_info;
+ struct iwl_context_info_gen3 *ctxt_info_gen3;
+ };
+ struct iwl_prph_info *prph_info;
+ struct iwl_prph_scratch *prph_scratch;
dma_addr_t ctxt_info_dma_addr;
+ dma_addr_t prph_info_dma_addr;
+ dma_addr_t prph_scratch_dma_addr;
+ dma_addr_t iml_dma_addr;
struct iwl_self_init_dram init_dram;
struct iwl_trans *trans;
@@ -477,6 +640,20 @@ IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
return (void *)trans->trans_specific;
}
+static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
+ struct msix_entry *entry)
+{
+ /*
+ * Before sending the interrupt the HW disables it to prevent
+ * a nested interrupt. This is done by writing 1 to the corresponding
+ * bit in the mask register. After handling the interrupt, it should be
+ * re-enabled by clearing this bit. This register is defined as
+ * write 1 clear (W1C) register, meaning that it's being clear
+ * by writing 1 to the bit.
+ */
+ iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
+}
+
static inline struct iwl_trans *
iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
{
@@ -504,6 +681,11 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
int iwl_pcie_rx_stop(struct iwl_trans *trans);
void iwl_pcie_rx_free(struct iwl_trans *trans);
+void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
+void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
+int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
+void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
+ struct iwl_rxq *rxq);
/*****************************************************
* ICT - interrupt handling
@@ -588,6 +770,60 @@ static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
}
+#define IWL_NUM_OF_COMPLETION_RINGS 31
+#define IWL_NUM_OF_TRANSFER_RINGS 527
+
+static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
+ int start)
+{
+ int i = 0;
+
+ while (start < fw->num_sec &&
+ fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
+ fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
+ start++;
+ i++;
+ }
+
+ return i;
+}
+
+static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
+ const struct fw_desc *sec,
+ struct iwl_dram_data *dram)
+{
+ dram->block = dma_alloc_coherent(trans->dev, sec->len,
+ &dram->physical,
+ GFP_KERNEL);
+ if (!dram->block)
+ return -ENOMEM;
+
+ dram->size = sec->len;
+ memcpy(dram->block, sec->data, sec->len);
+
+ return 0;
+}
+
+static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
+ int i;
+
+ if (!dram->fw) {
+ WARN_ON(dram->fw_cnt);
+ return;
+ }
+
+ for (i = 0; i < dram->fw_cnt; i++)
+ dma_free_coherent(trans->dev, dram->fw[i].size,
+ dram->fw[i].block, dram->fw[i].physical);
+
+ kfree(dram->fw);
+ dram->fw_cnt = 0;
+ dram->fw = NULL;
+}
+
static inline void iwl_disable_interrupts(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -660,7 +896,7 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
}
}
-static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index)
+static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
{
return index & (q->n_window - 1);
}
@@ -676,6 +912,29 @@ static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
return txq->tfds + trans_pcie->tfd_size * idx;
}
+static inline const char *queue_name(struct device *dev,
+ struct iwl_trans_pcie *trans_p, int i)
+{
+ if (trans_p->shared_vec_mask) {
+ int vec = trans_p->shared_vec_mask &
+ IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
+
+ if (i == 0)
+ return DRV_NAME ": shared IRQ";
+
+ return devm_kasprintf(dev, GFP_KERNEL,
+ DRV_NAME ": queue %d", i + vec);
+ }
+ if (i == 0)
+ return DRV_NAME ": default queue";
+
+ if (i == trans_p->alloc_vecs - 1)
+ return DRV_NAME ": exception";
+
+ return devm_kasprintf(dev, GFP_KERNEL,
+ DRV_NAME ": queue %d", i);
+}
+
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -730,9 +989,13 @@ static inline void iwl_stop_queue(struct iwl_trans *trans,
static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
{
- return q->write_ptr >= q->read_ptr ?
- (i >= q->read_ptr && i < q->write_ptr) :
- !(i < q->read_ptr && i >= q->write_ptr);
+ int index = iwl_pcie_get_cmd_index(q, i);
+ int r = iwl_pcie_get_cmd_index(q, q->read_ptr);
+ int w = iwl_pcie_get_cmd_index(q, q->write_ptr);
+
+ return w >= r ?
+ (index >= r && index < w) :
+ !(index < r && index >= w);
}
static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
@@ -801,7 +1064,7 @@ bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
bool was_in_rfkill);
void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
-int iwl_queue_space(const struct iwl_txq *q);
+int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q);
void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
@@ -818,6 +1081,9 @@ void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
#endif
+/* common functions that are used by gen3 transport */
+void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
+
/* transport gen 2 exported functions */
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
const struct fw_img *fw, bool run_in_rfkill);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index d15f5ba2dc77..d017aa2a0a8b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -18,8 +18,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ * this program.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
@@ -37,6 +36,7 @@
#include "iwl-io.h"
#include "internal.h"
#include "iwl-op-mode.h"
+#include "iwl-context-info-gen3.h"
/******************************************************************************
*
@@ -167,7 +167,12 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
*/
int iwl_pcie_rx_stop(struct iwl_trans *trans)
{
- if (trans->cfg->mq_rx_supported) {
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ /* TODO: remove this for 22560 once fw does it */
+ iwl_write_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
+ return iwl_poll_prph_bit(trans, RFH_GEN_STATUS_GEN3,
+ RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
+ } else if (trans->cfg->mq_rx_supported) {
iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
@@ -209,7 +214,11 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
}
rxq->write_actual = round_down(rxq->write, 8);
- if (trans->cfg->mq_rx_supported)
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ iwl_write32(trans, HBUS_TARG_WRPTR,
+ (rxq->write_actual |
+ ((FIRST_RX_QUEUE + rxq->id) << 16)));
+ else if (trans->cfg->mq_rx_supported)
iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
rxq->write_actual);
else
@@ -233,6 +242,25 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
}
}
+static void iwl_pcie_restock_bd(struct iwl_trans *trans,
+ struct iwl_rxq *rxq,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ struct iwl_rx_transfer_desc *bd = rxq->bd;
+
+ bd[rxq->write].type_n_size =
+ cpu_to_le32((IWL_RX_TD_TYPE & IWL_RX_TD_TYPE_MSK) |
+ ((IWL_RX_TD_SIZE_2K >> 8) & IWL_RX_TD_SIZE_MSK));
+ bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
+ bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
+ } else {
+ __le64 *bd = rxq->bd;
+
+ bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
+ }
+}
+
/*
* iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
*/
@@ -254,8 +282,6 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
spin_lock(&rxq->lock);
while (rxq->free_count) {
- __le64 *bd = (__le64 *)rxq->bd;
-
/* Get next free Rx buffer, remove from free list */
rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
list);
@@ -264,7 +290,7 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
/* 12 first bits are expected to be empty */
WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
/* Point to Rx buffer via next RBD in circular buffer */
- bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
+ iwl_pcie_restock_bd(trans, rxq, rxb);
rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
rxq->free_count--;
}
@@ -391,8 +417,8 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
* iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
* allocated buffers.
*/
-static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
- struct iwl_rxq *rxq)
+void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
+ struct iwl_rxq *rxq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_mem_buffer *rxb;
@@ -448,7 +474,7 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
}
}
-static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
+void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
@@ -608,89 +634,174 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data)
iwl_pcie_rx_allocator(trans_pcie->trans);
}
-static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
+static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rb_allocator *rba = &trans_pcie->rba;
- struct device *dev = trans->dev;
- int i;
- int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
- sizeof(__le32);
+ struct iwl_rx_transfer_desc *rx_td;
- if (WARN_ON(trans_pcie->rxq))
- return -EINVAL;
+ if (use_rx_td)
+ return sizeof(*rx_td);
+ else
+ return trans->cfg->mq_rx_supported ? sizeof(__le64) :
+ sizeof(__le32);
+}
- trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
- GFP_KERNEL);
- if (!trans_pcie->rxq)
- return -EINVAL;
+static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
+ struct iwl_rxq *rxq)
+{
+ struct device *dev = trans->dev;
+ bool use_rx_td = (trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560);
+ int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
+
+ if (rxq->bd)
+ dma_free_coherent(trans->dev,
+ free_size * rxq->queue_size,
+ rxq->bd, rxq->bd_dma);
+ rxq->bd_dma = 0;
+ rxq->bd = NULL;
+
+ if (rxq->rb_stts)
+ dma_free_coherent(trans->dev,
+ use_rx_td ? sizeof(__le16) :
+ sizeof(struct iwl_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
+ rxq->rb_stts_dma = 0;
+ rxq->rb_stts = NULL;
+
+ if (rxq->used_bd)
+ dma_free_coherent(trans->dev,
+ (use_rx_td ? sizeof(*rxq->cd) :
+ sizeof(__le32)) * rxq->queue_size,
+ rxq->used_bd, rxq->used_bd_dma);
+ rxq->used_bd_dma = 0;
+ rxq->used_bd = NULL;
+
+ if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
+ return;
- spin_lock_init(&rba->lock);
+ if (rxq->tr_tail)
+ dma_free_coherent(dev, sizeof(__le16),
+ rxq->tr_tail, rxq->tr_tail_dma);
+ rxq->tr_tail_dma = 0;
+ rxq->tr_tail = NULL;
+
+ if (rxq->cr_tail)
+ dma_free_coherent(dev, sizeof(__le16),
+ rxq->cr_tail, rxq->cr_tail_dma);
+ rxq->cr_tail_dma = 0;
+ rxq->cr_tail = NULL;
+}
- for (i = 0; i < trans->num_rx_queues; i++) {
- struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
+ struct iwl_rxq *rxq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct device *dev = trans->dev;
+ int i;
+ int free_size;
+ bool use_rx_td = (trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560);
- spin_lock_init(&rxq->lock);
- if (trans->cfg->mq_rx_supported)
- rxq->queue_size = MQ_RX_TABLE_SIZE;
- else
- rxq->queue_size = RX_QUEUE_SIZE;
+ spin_lock_init(&rxq->lock);
+ if (trans->cfg->mq_rx_supported)
+ rxq->queue_size = MQ_RX_TABLE_SIZE;
+ else
+ rxq->queue_size = RX_QUEUE_SIZE;
- /*
- * Allocate the circular buffer of Read Buffer Descriptors
- * (RBDs)
- */
- rxq->bd = dma_zalloc_coherent(dev,
- free_size * rxq->queue_size,
- &rxq->bd_dma, GFP_KERNEL);
- if (!rxq->bd)
- goto err;
+ free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
- if (trans->cfg->mq_rx_supported) {
- rxq->used_bd = dma_zalloc_coherent(dev,
- sizeof(__le32) *
- rxq->queue_size,
- &rxq->used_bd_dma,
- GFP_KERNEL);
- if (!rxq->used_bd)
- goto err;
- }
+ /*
+ * Allocate the circular buffer of Read Buffer Descriptors
+ * (RBDs)
+ */
+ rxq->bd = dma_zalloc_coherent(dev,
+ free_size * rxq->queue_size,
+ &rxq->bd_dma, GFP_KERNEL);
+ if (!rxq->bd)
+ goto err;
- /*Allocate the driver's pointer to receive buffer status */
- rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
- &rxq->rb_stts_dma,
+ if (trans->cfg->mq_rx_supported) {
+ rxq->used_bd = dma_zalloc_coherent(dev,
+ (use_rx_td ?
+ sizeof(*rxq->cd) :
+ sizeof(__le32)) *
+ rxq->queue_size,
+ &rxq->used_bd_dma,
GFP_KERNEL);
- if (!rxq->rb_stts)
+ if (!rxq->used_bd)
goto err;
}
+
+ /* Allocate the driver's pointer to receive buffer status */
+ rxq->rb_stts = dma_zalloc_coherent(dev, use_rx_td ?
+ sizeof(__le16) :
+ sizeof(struct iwl_rb_status),
+ &rxq->rb_stts_dma,
+ GFP_KERNEL);
+ if (!rxq->rb_stts)
+ goto err;
+
+ if (!use_rx_td)
+ return 0;
+
+ /* Allocate the driver's pointer to TR tail */
+ rxq->tr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
+ &rxq->tr_tail_dma,
+ GFP_KERNEL);
+ if (!rxq->tr_tail)
+ goto err;
+
+ /* Allocate the driver's pointer to CR tail */
+ rxq->cr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
+ &rxq->cr_tail_dma,
+ GFP_KERNEL);
+ if (!rxq->cr_tail)
+ goto err;
+ /*
+ * W/A 22560 device step Z0 must be non zero bug
+ * TODO: remove this when stop supporting Z0
+ */
+ *rxq->cr_tail = cpu_to_le16(500);
+
return 0;
err:
for (i = 0; i < trans->num_rx_queues; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
- if (rxq->bd)
- dma_free_coherent(dev, free_size * rxq->queue_size,
- rxq->bd, rxq->bd_dma);
- rxq->bd_dma = 0;
- rxq->bd = NULL;
-
- if (rxq->rb_stts)
- dma_free_coherent(trans->dev,
- sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
-
- if (rxq->used_bd)
- dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size,
- rxq->used_bd, rxq->used_bd_dma);
- rxq->used_bd_dma = 0;
- rxq->used_bd = NULL;
+ iwl_pcie_free_rxq_dma(trans, rxq);
}
kfree(trans_pcie->rxq);
return -ENOMEM;
}
+static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
+ int i, ret;
+
+ if (WARN_ON(trans_pcie->rxq))
+ return -EINVAL;
+
+ trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
+ GFP_KERNEL);
+ if (!trans_pcie->rxq)
+ return -EINVAL;
+
+ spin_lock_init(&rba->lock);
+
+ for (i = 0; i < trans->num_rx_queues; i++) {
+ struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+
+ ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -792,6 +903,9 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
int i;
switch (trans_pcie->rx_buf_size) {
+ case IWL_AMSDU_2K:
+ rb_size = RFH_RXF_DMA_RB_SIZE_2K;
+ break;
case IWL_AMSDU_4K:
rb_size = RFH_RXF_DMA_RB_SIZE_4K;
break;
@@ -872,7 +986,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
iwl_pcie_enable_rx_wake(trans, true);
}
-static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
+void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
{
lockdep_assert_held(&rxq->lock);
@@ -882,7 +996,7 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
rxq->used_count = 0;
}
-static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
+int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
{
WARN_ON(1);
return 0;
@@ -931,7 +1045,9 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
rxq->read = 0;
rxq->write = 0;
rxq->write_actual = 0;
- memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
+ memset(rxq->rb_stts, 0,
+ (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
+ sizeof(__le16) : sizeof(struct iwl_rb_status));
iwl_pcie_rx_init_rxb_lists(rxq);
@@ -1002,8 +1118,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rb_allocator *rba = &trans_pcie->rba;
- int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
- sizeof(__le32);
int i;
/*
@@ -1022,27 +1136,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
for (i = 0; i < trans->num_rx_queues; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
- if (rxq->bd)
- dma_free_coherent(trans->dev,
- free_size * rxq->queue_size,
- rxq->bd, rxq->bd_dma);
- rxq->bd_dma = 0;
- rxq->bd = NULL;
-
- if (rxq->rb_stts)
- dma_free_coherent(trans->dev,
- sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
- else
- IWL_DEBUG_INFO(trans,
- "Free rxq->rb_stts which is NULL\n");
-
- if (rxq->used_bd)
- dma_free_coherent(trans->dev,
- sizeof(__le32) * rxq->queue_size,
- rxq->used_bd, rxq->used_bd_dma);
- rxq->used_bd_dma = 0;
- rxq->used_bd = NULL;
+ iwl_pcie_free_rxq_dma(trans, rxq);
if (rxq->napi.poll)
netif_napi_del(&rxq->napi);
@@ -1202,6 +1296,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
}
page_stolen |= rxcb._page_stolen;
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ break;
offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
}
@@ -1236,6 +1332,45 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
}
+static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
+ struct iwl_rxq *rxq, int i)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_mem_buffer *rxb;
+ u16 vid;
+
+ if (!trans->cfg->mq_rx_supported) {
+ rxb = rxq->queue[i];
+ rxq->queue[i] = NULL;
+ return rxb;
+ }
+
+ /* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF;
+ else
+ vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF;
+
+ if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table))
+ goto out_err;
+
+ rxb = trans_pcie->global_table[vid - 1];
+ if (rxb->invalid)
+ goto out_err;
+
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ rxb->size = le32_to_cpu(rxq->cd[i].size) & IWL_RX_CD_SIZE;
+
+ rxb->invalid = true;
+
+ return rxb;
+
+out_err:
+ WARN(1, "Invalid rxb from HW %u\n", (u32)vid);
+ iwl_force_nmi(trans);
+ return NULL;
+}
+
/*
* iwl_pcie_rx_handle - Main entry function for receiving responses from fw
*/
@@ -1250,7 +1385,7 @@ restart:
spin_lock(&rxq->lock);
/* uCode's read index (stored in shared DRAM) indicates the last Rx
* buffer that the driver may process (last buffer filled by ucode). */
- r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+ r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
i = rxq->read;
/* W/A 9000 device step A0 wrap-around bug */
@@ -1266,30 +1401,9 @@ restart:
if (unlikely(rxq->used_count == rxq->queue_size / 2))
emergency = true;
- if (trans->cfg->mq_rx_supported) {
- /*
- * used_bd is a 32 bit but only 12 are used to retrieve
- * the vid
- */
- u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF;
-
- if (WARN(!vid ||
- vid > ARRAY_SIZE(trans_pcie->global_table),
- "Invalid rxb index from HW %u\n", (u32)vid)) {
- iwl_force_nmi(trans);
- goto out;
- }
- rxb = trans_pcie->global_table[vid - 1];
- if (WARN(rxb->invalid,
- "Invalid rxb from HW %u\n", (u32)vid)) {
- iwl_force_nmi(trans);
- goto out;
- }
- rxb->invalid = true;
- } else {
- rxb = rxq->queue[i];
- rxq->queue[i] = NULL;
- }
+ rxb = iwl_pcie_get_rxb(trans, rxq, i);
+ if (!rxb)
+ goto out;
IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency);
@@ -1331,6 +1445,9 @@ restart:
out:
/* Backtrack one entry */
rxq->read = i;
+ /* update cr tail with the rxq read pointer */
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ *rxq->cr_tail = cpu_to_le16(r);
spin_unlock(&rxq->lock);
/*
@@ -1362,20 +1479,6 @@ static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
}
-static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
- struct msix_entry *entry)
-{
- /*
- * Before sending the interrupt the HW disables it to prevent
- * a nested interrupt. This is done by writing 1 to the corresponding
- * bit in the mask register. After handling the interrupt, it should be
- * re-enabled by clearing this bit. This register is defined as
- * write 1 clear (W1C) register, meaning that it's being clear
- * by writing 1 to the bit.
- */
- iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
-}
-
/*
* iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
* This interrupt handler should be used with RSS queue only.
@@ -1970,7 +2073,8 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
/* Error detected by uCode */
if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
- (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) {
+ (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
+ (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
IWL_ERR(trans,
"Microcode SW error detected. Restarting 0x%X.\n",
inta_fh);
@@ -1995,8 +2099,18 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
}
}
- /* uCode wakes up after power-down sleep */
- if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560 &&
+ inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) {
+ /* Reflect IML transfer status */
+ int res = iwl_read32(trans, CSR_IML_RESP_ADDR);
+
+ IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res);
+ if (res == IWL_IMAGE_RESP_FAIL) {
+ isr_stats->sw++;
+ iwl_pcie_irq_handle_error(trans);
+ }
+ } else if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
+ /* uCode wakes up after power-down sleep */
IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
iwl_pcie_rxq_check_wrptr(trans);
iwl_pcie_txq_check_wrptrs(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index b8e8dac2895d..2bc67219ed3e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -53,6 +53,7 @@
#include "iwl-trans.h"
#include "iwl-prph.h"
#include "iwl-context-info.h"
+#include "iwl-context-info-gen3.h"
#include "internal.h"
/*
@@ -188,7 +189,10 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
}
iwl_pcie_ctxt_info_free_paging(trans);
- iwl_pcie_ctxt_info_free(trans);
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560)
+ iwl_pcie_ctxt_info_gen3_free(trans);
+ else
+ iwl_pcie_ctxt_info_free(trans);
/* Make sure (redundant) we've released our request to stay awake */
iwl_clear_bit(trans, CSR_GP_CNTRL,
@@ -346,7 +350,10 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
goto out;
}
- ret = iwl_pcie_ctxt_info_init(trans, fw);
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560)
+ ret = iwl_pcie_ctxt_info_gen3_init(trans, fw);
+ else
+ ret = iwl_pcie_ctxt_info_init(trans, fw);
if (ret)
goto out;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 7229991ae70d..7d319b6863fe 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -84,6 +84,7 @@
#include "iwl-scd.h"
#include "iwl-agn-hw.h"
#include "fw/error-dump.h"
+#include "fw/dbg.h"
#include "internal.h"
#include "iwl-fh.h"
@@ -203,7 +204,7 @@ static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
trans_pcie->fw_mon_size = 0;
}
-static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
+void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct page *page = NULL;
@@ -1132,21 +1133,44 @@ static struct iwl_causes_list causes_list[] = {
{MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
};
+static struct iwl_causes_list causes_list_v2[] = {
+ {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0},
+ {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1},
+ {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3},
+ {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5},
+ {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10},
+ {MSIX_HW_INT_CAUSES_REG_IPC, CSR_MSIX_HW_INT_MASK_AD, 0x11},
+ {MSIX_HW_INT_CAUSES_REG_SW_ERR_V2, CSR_MSIX_HW_INT_MASK_AD, 0x15},
+ {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16},
+ {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17},
+ {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18},
+ {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A},
+ {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B},
+ {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D},
+ {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
+};
+
static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
- int i;
+ int i, arr_size =
+ (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) ?
+ ARRAY_SIZE(causes_list) : ARRAY_SIZE(causes_list_v2);
/*
* Access all non RX causes and map them to the default irq.
* In case we are missing at least one interrupt vector,
* the first interrupt vector will serve non-RX and FBQ causes.
*/
- for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
- iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val);
- iwl_clear_bit(trans, causes_list[i].mask_reg,
- causes_list[i].cause_num);
+ for (i = 0; i < arr_size; i++) {
+ struct iwl_causes_list *causes =
+ (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) ?
+ causes_list : causes_list_v2;
+
+ iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
+ iwl_clear_bit(trans, causes[i].mask_reg,
+ causes[i].cause_num);
}
}
@@ -1539,18 +1563,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
iwl_pcie_enable_rx_wake(trans, true);
- /*
- * Reconfigure IVAR table in case of MSIX or reset ict table in
- * MSI mode since HW reset erased it.
- * Also enables interrupts - none will happen as
- * the device doesn't know we're waking it up, only when
- * the opmode actually tells it after this call.
- */
- iwl_pcie_conf_msix_hw(trans_pcie);
- if (!trans_pcie->msix_enabled)
- iwl_pcie_reset_ict(trans);
- iwl_enable_interrupts(trans);
-
iwl_set_bit(trans, CSR_GP_CNTRL,
BIT(trans->cfg->csr->flag_mac_access_req));
iwl_set_bit(trans, CSR_GP_CNTRL,
@@ -1568,6 +1580,18 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
return ret;
}
+ /*
+ * Reconfigure IVAR table in case of MSIX or reset ict table in
+ * MSI mode since HW reset erased it.
+ * Also enables interrupts - none will happen as
+ * the device doesn't know we're waking it up, only when
+ * the opmode actually tells it after this call.
+ */
+ iwl_pcie_conf_msix_hw(trans_pcie);
+ if (!trans_pcie->msix_enabled)
+ iwl_pcie_reset_ict(trans);
+ iwl_enable_interrupts(trans);
+
iwl_pcie_set_pwr(trans, false);
if (!reset) {
@@ -1685,29 +1709,6 @@ static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
}
}
-static const char *queue_name(struct device *dev,
- struct iwl_trans_pcie *trans_p, int i)
-{
- if (trans_p->shared_vec_mask) {
- int vec = trans_p->shared_vec_mask &
- IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
-
- if (i == 0)
- return DRV_NAME ": shared IRQ";
-
- return devm_kasprintf(dev, GFP_KERNEL,
- DRV_NAME ": queue %d", i + vec);
- }
- if (i == 0)
- return DRV_NAME ": default queue";
-
- if (i == trans_p->alloc_vecs - 1)
- return DRV_NAME ": exception";
-
- return devm_kasprintf(dev, GFP_KERNEL,
- DRV_NAME ": queue %d", i);
-}
-
static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
struct iwl_trans_pcie *trans_pcie)
{
@@ -2236,12 +2237,28 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
jiffies_to_msecs(txq->wd_timeout),
txq->read_ptr, txq->write_ptr,
iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
- (TFD_QUEUE_SIZE_MAX - 1),
+ (trans->cfg->base_params->max_tfd_queue_size - 1),
iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
- (TFD_QUEUE_SIZE_MAX - 1),
+ (trans->cfg->base_params->max_tfd_queue_size - 1),
iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
}
+static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
+ struct iwl_trans_rxq_dma_data *data)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (queue >= trans->num_rx_queues || !trans_pcie->rxq)
+ return -EINVAL;
+
+ data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma;
+ data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma;
+ data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma;
+ data->fr_bd_wid = 0;
+
+ return 0;
+}
+
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -2522,10 +2539,11 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
rxq->free_count);
if (rxq->rb_stts) {
+ u32 r = __le16_to_cpu(iwl_get_closed_rb_stts(trans,
+ rxq));
pos += scnprintf(buf + pos, bufsz - pos,
"\tclosed_rb_num: %u\n",
- le16_to_cpu(rxq->rb_stts->closed_rb_num) &
- 0x0FFF);
+ r & 0x0FFF);
} else {
pos += scnprintf(buf + pos, bufsz - pos,
"\tclosed_rb_num: Not Allocated\n");
@@ -2731,7 +2749,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
spin_lock(&rxq->lock);
- r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+ r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
for (i = rxq->read, j = 0;
i != r && j < allocated_rb_nums;
@@ -2934,11 +2952,12 @@ static struct iwl_trans_dump_data
struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
struct iwl_fw_error_dump_txcmd *txcmd;
struct iwl_trans_dump_data *dump_data;
- u32 len, num_rbs;
+ u32 len, num_rbs = 0;
u32 monitor_len;
int i, ptr;
bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
- !trans->cfg->mq_rx_supported;
+ !trans->cfg->mq_rx_supported &&
+ trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
/* transport dump header */
len = sizeof(*dump_data);
@@ -2990,6 +3009,10 @@ static struct iwl_trans_dump_data
}
if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
+ if (!(trans->dbg_dump_mask &
+ BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)))
+ return NULL;
+
dump_data = vzalloc(len);
if (!dump_data)
return NULL;
@@ -3002,22 +3025,28 @@ static struct iwl_trans_dump_data
}
/* CSR registers */
- len += sizeof(*data) + IWL_CSR_TO_DUMP;
+ if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
+ len += sizeof(*data) + IWL_CSR_TO_DUMP;
/* FH registers */
- if (trans->cfg->gen2)
- len += sizeof(*data) +
- (FH_MEM_UPPER_BOUND_GEN2 - FH_MEM_LOWER_BOUND_GEN2);
- else
- len += sizeof(*data) +
- (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
+ if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
+ if (trans->cfg->gen2)
+ len += sizeof(*data) +
+ (FH_MEM_UPPER_BOUND_GEN2 -
+ FH_MEM_LOWER_BOUND_GEN2);
+ else
+ len += sizeof(*data) +
+ (FH_MEM_UPPER_BOUND -
+ FH_MEM_LOWER_BOUND);
+ }
if (dump_rbs) {
/* Dump RBs is supported only for pre-9000 devices (1 queue) */
struct iwl_rxq *rxq = &trans_pcie->rxq[0];
/* RBs */
- num_rbs = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num))
- & 0x0FFF;
+ num_rbs =
+ le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq))
+ & 0x0FFF;
num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
len += num_rbs * (sizeof(*data) +
sizeof(struct iwl_fw_error_dump_rb) +
@@ -3025,7 +3054,8 @@ static struct iwl_trans_dump_data
}
/* Paged memory for gen2 HW */
- if (trans->cfg->gen2)
+ if (trans->cfg->gen2 &&
+ trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++)
len += sizeof(*data) +
sizeof(struct iwl_fw_error_dump_paging) +
@@ -3037,41 +3067,51 @@ static struct iwl_trans_dump_data
len = 0;
data = (void *)dump_data->data;
- data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
- txcmd = (void *)data->data;
- spin_lock_bh(&cmdq->lock);
- ptr = cmdq->write_ptr;
- for (i = 0; i < cmdq->n_window; i++) {
- u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr);
- u32 caplen, cmdlen;
-
- cmdlen = iwl_trans_pcie_get_cmdlen(trans, cmdq->tfds +
- trans_pcie->tfd_size * ptr);
- caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
-
- if (cmdlen) {
- len += sizeof(*txcmd) + caplen;
- txcmd->cmdlen = cpu_to_le32(cmdlen);
- txcmd->caplen = cpu_to_le32(caplen);
- memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen);
- txcmd = (void *)((u8 *)txcmd->data + caplen);
+
+ if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD)) {
+ u16 tfd_size = trans_pcie->tfd_size;
+
+ data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
+ txcmd = (void *)data->data;
+ spin_lock_bh(&cmdq->lock);
+ ptr = cmdq->write_ptr;
+ for (i = 0; i < cmdq->n_window; i++) {
+ u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr);
+ u32 caplen, cmdlen;
+
+ cmdlen = iwl_trans_pcie_get_cmdlen(trans,
+ cmdq->tfds +
+ tfd_size * ptr);
+ caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
+
+ if (cmdlen) {
+ len += sizeof(*txcmd) + caplen;
+ txcmd->cmdlen = cpu_to_le32(cmdlen);
+ txcmd->caplen = cpu_to_le32(caplen);
+ memcpy(txcmd->data, cmdq->entries[idx].cmd,
+ caplen);
+ txcmd = (void *)((u8 *)txcmd->data + caplen);
+ }
+
+ ptr = iwl_queue_dec_wrap(trans, ptr);
}
+ spin_unlock_bh(&cmdq->lock);
- ptr = iwl_queue_dec_wrap(ptr);
+ data->len = cpu_to_le32(len);
+ len += sizeof(*data);
+ data = iwl_fw_error_next_data(data);
}
- spin_unlock_bh(&cmdq->lock);
- data->len = cpu_to_le32(len);
- len += sizeof(*data);
- data = iwl_fw_error_next_data(data);
-
- len += iwl_trans_pcie_dump_csr(trans, &data);
- len += iwl_trans_pcie_fh_regs_dump(trans, &data);
+ if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
+ len += iwl_trans_pcie_dump_csr(trans, &data);
+ if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS))
+ len += iwl_trans_pcie_fh_regs_dump(trans, &data);
if (dump_rbs)
len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
/* Paged memory for gen2 HW */
- if (trans->cfg->gen2) {
+ if (trans->cfg->gen2 &&
+ trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) {
struct iwl_fw_error_dump_paging *paging;
dma_addr_t addr =
@@ -3091,8 +3131,8 @@ static struct iwl_trans_dump_data
len += sizeof(*data) + sizeof(*paging) + page_len;
}
}
-
- len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
+ if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
+ len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
dump_data->len = len;
@@ -3187,6 +3227,7 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
.txq_alloc = iwl_trans_pcie_dyn_txq_alloc,
.txq_free = iwl_trans_pcie_dyn_txq_free,
.wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
+ .rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
};
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
@@ -3349,14 +3390,26 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
#if IS_ENABLED(CONFIG_IWLMVM)
trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID);
- if (trans->hw_rf_id == CSR_HW_RF_ID_TYPE_HR) {
+
+ if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
u32 hw_status;
hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
- if (hw_status & UMAG_GEN_HW_IS_FPGA)
- trans->cfg = &iwl22000_2ax_cfg_qnj_hr_f0;
- else
+ if (CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_B_STEP)
+ /*
+ * b step fw is the same for physical card and fpga
+ */
+ trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
+ else if ((hw_status & UMAG_GEN_HW_IS_FPGA) &&
+ CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_A_STEP) {
+ trans->cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0;
+ } else {
+ /*
+ * a step no FPGA
+ */
trans->cfg = &iwl22000_2ac_cfg_hr;
+ }
}
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 48890a1c825f..b99f33ff9123 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -19,6 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -50,6 +52,7 @@
*****************************************************************************/
#include <linux/pm_runtime.h>
#include <net/tso.h>
+#include <linux/tcp.h>
#include "iwl-debug.h"
#include "iwl-csr.h"
@@ -84,16 +87,20 @@ void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
/*
* iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array
*/
-static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt,
+static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
+ struct iwl_txq *txq, u16 byte_cnt,
int num_tbs)
{
struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
+ struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
+ struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
u8 filled_tfd_size, num_fetch_chunks;
u16 len = byte_cnt;
__le16 bc_ent;
- len = DIV_ROUND_UP(len, 4);
+ if (trans_pcie->bc_table_dword)
+ len = DIV_ROUND_UP(len, 4);
if (WARN_ON(len > 0xFFF || idx >= txq->n_window))
return;
@@ -111,7 +118,10 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt,
num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
- scd_bc_tbl->tfd_offset[idx] = bc_ent;
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
+ else
+ scd_bc_tbl->tfd_offset[idx] = bc_ent;
}
/*
@@ -355,52 +365,89 @@ out_err:
return -EINVAL;
}
-static
-struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
- struct iwl_txq *txq,
- struct iwl_device_cmd *dev_cmd,
- struct sk_buff *skb,
- struct iwl_cmd_meta *out_meta)
+static struct
+iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta,
+ int hdr_len,
+ int tx_cmd_len)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
dma_addr_t tb_phys;
- bool amsdu;
- int i, len, tb1_len, tb2_len, hdr_len;
+ int len;
void *tb1_addr;
- memset(tfd, 0, sizeof(*tfd));
+ tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
- amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
- (*ieee80211_get_qos_ctl(hdr) &
- IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
+
+ /*
+ * The second TB (tb1) points to the remainder of the TX command
+ * and the 802.11 header - dword aligned size
+ * (This calculation modifies the TX command, so do it before the
+ * setup of the first TB)
+ */
+ len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
+ IWL_FIRST_TB_SIZE;
+
+ /* do not align A-MSDU to dword as the subframe header aligns it */
+
+ /* map the data for TB1 */
+ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
+ tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ goto out_err;
+ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len);
+
+ if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
+ len + IWL_FIRST_TB_SIZE,
+ hdr_len, dev_cmd))
+ goto out_err;
+
+ /* building the A-MSDU might have changed this data, memcpy it now */
+ memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);
+ return tfd;
+
+out_err:
+ iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
+ return NULL;
+}
+
+static struct
+iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta,
+ int hdr_len,
+ int tx_cmd_len)
+{
+ int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
+ struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
+ dma_addr_t tb_phys;
+ int i, len, tb1_len, tb2_len;
+ void *tb1_addr;
tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
+
/* The first TB points to bi-directional DMA data */
- if (!amsdu)
- memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr,
- IWL_FIRST_TB_SIZE);
+ memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
- /* there must be data left over for TB1 or this code must be changed */
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
-
/*
* The second TB (tb1) points to the remainder of the TX command
* and the 802.11 header - dword aligned size
* (This calculation modifies the TX command, so do it before the
* setup of the first TB)
*/
- len = sizeof(struct iwl_tx_cmd_gen2) + sizeof(struct iwl_cmd_header) +
- ieee80211_hdrlen(hdr->frame_control) - IWL_FIRST_TB_SIZE;
+ len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
+ IWL_FIRST_TB_SIZE;
- /* do not align A-MSDU to dword as the subframe header aligns it */
- if (amsdu)
- tb1_len = len;
- else
- tb1_len = ALIGN(len, 4);
+ tb1_len = ALIGN(len, 4);
/* map the data for TB1 */
tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
@@ -409,23 +456,6 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
goto out_err;
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
-
- if (amsdu) {
- if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
- tb1_len + IWL_FIRST_TB_SIZE,
- hdr_len, dev_cmd))
- goto out_err;
-
- /*
- * building the A-MSDU might have changed this data, so memcpy
- * it now
- */
- memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr,
- IWL_FIRST_TB_SIZE);
- return tfd;
- }
-
/* set up TFD's third entry to point to remainder of skb's head */
tb2_len = skb_headlen(skb) - hdr_len;
@@ -467,13 +497,50 @@ out_err:
return NULL;
}
+static
+struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
+ struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
+ int len, hdr_len;
+ bool amsdu;
+
+ /* There must be data left over for TB1 or this code must be changed */
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
+
+ memset(tfd, 0, sizeof(*tfd));
+
+ if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
+ len = sizeof(struct iwl_tx_cmd_gen2);
+ else
+ len = sizeof(struct iwl_tx_cmd_gen3);
+
+ amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
+ (*ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (amsdu)
+ return iwl_pcie_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
+ out_meta, hdr_len, len);
+
+ return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
+ hdr_len, len);
+}
+
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
struct iwl_cmd_meta *out_meta;
struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ u16 cmd_len;
int idx;
void *tfd;
@@ -488,11 +555,23 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
spin_lock(&txq->lock);
- if (iwl_queue_space(txq) < txq->high_mark) {
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
+ (void *)dev_cmd->payload;
+
+ cmd_len = le16_to_cpu(tx_cmd_gen3->len);
+ } else {
+ struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
+ (void *)dev_cmd->payload;
+
+ cmd_len = le16_to_cpu(tx_cmd_gen2->len);
+ }
+
+ if (iwl_queue_space(trans, txq) < txq->high_mark) {
iwl_stop_queue(trans, txq);
/* don't put the packet on the ring, if there is no room */
- if (unlikely(iwl_queue_space(txq) < 3)) {
+ if (unlikely(iwl_queue_space(trans, txq) < 3)) {
struct iwl_device_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
@@ -526,7 +605,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
/* Set up entry for this TFD in Tx byte-count array */
- iwl_pcie_gen2_update_byte_tbl(txq, le16_to_cpu(tx_cmd->len),
+ iwl_pcie_gen2_update_byte_tbl(trans_pcie, txq, cmd_len,
iwl_pcie_gen2_get_num_tbs(trans, tfd));
/* start timer if queue currently empty */
@@ -538,7 +617,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
/* Tell device the write index *just past* this latest filled TFD */
- txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
+ txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
/*
* At this point the frame is "transmitted" successfully
@@ -650,7 +729,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
memset(tfd, 0, sizeof(*tfd));
- if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+ if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
spin_unlock_bh(&txq->lock);
IWL_ERR(trans, "No space in command queue\n");
@@ -787,7 +866,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
iwl_trans_ref(trans);
}
/* Increment and update queue's write index */
- txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
+ txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
@@ -954,7 +1033,7 @@ void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
iwl_pcie_free_tso_page(trans_pcie, skb);
}
iwl_pcie_gen2_free_tfd(trans, txq);
- txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
+ txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
if (txq->read_ptr == txq->write_ptr) {
unsigned long flags;
@@ -1062,6 +1141,9 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
if (!txq)
return -ENOMEM;
ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl,
+ (trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560) ?
+ sizeof(struct iwl_gen3_bc_tbl) :
sizeof(struct iwlagn_scd_bc_tbl));
if (ret) {
IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
@@ -1113,7 +1195,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
txq->id = qid;
trans_pcie->txq[qid] = txq;
- wr_ptr &= (TFD_QUEUE_SIZE_MAX - 1);
+ wr_ptr &= (trans->cfg->base_params->max_tfd_queue_size - 1);
/* Place first TFD at index corresponding to start sequence number */
txq->read_ptr = wr_ptr;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 473fe7ccb07c..93f0d387688a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -71,27 +71,28 @@
*
***************************************************/
-int iwl_queue_space(const struct iwl_txq *q)
+int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q)
{
unsigned int max;
unsigned int used;
/*
* To avoid ambiguity between empty and completely full queues, there
- * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue.
- * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need
+ * should always be less than max_tfd_queue_size elements in the queue.
+ * If q->n_window is smaller than max_tfd_queue_size, there is no need
* to reserve any queue entries for this purpose.
*/
- if (q->n_window < TFD_QUEUE_SIZE_MAX)
+ if (q->n_window < trans->cfg->base_params->max_tfd_queue_size)
max = q->n_window;
else
- max = TFD_QUEUE_SIZE_MAX - 1;
+ max = trans->cfg->base_params->max_tfd_queue_size - 1;
/*
- * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to
- * modulo by TFD_QUEUE_SIZE_MAX and is well defined.
+ * max_tfd_queue_size is a power of 2, so the following is equivalent to
+ * modulo by max_tfd_queue_size and is well defined.
*/
- used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1);
+ used = (q->write_ptr - q->read_ptr) &
+ (trans->cfg->base_params->max_tfd_queue_size - 1);
if (WARN_ON(used > max))
return 0;
@@ -489,7 +490,8 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, bool cmd_queue)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- size_t tfd_sz = trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX;
+ size_t tfd_sz = trans_pcie->tfd_size *
+ trans->cfg->base_params->max_tfd_queue_size;
size_t tb0_buf_sz;
int i;
@@ -555,12 +557,16 @@ int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, bool cmd_queue)
{
int ret;
+ u32 tfd_queue_max_size = trans->cfg->base_params->max_tfd_queue_size;
txq->need_update = false;
- /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
+ /* max_tfd_queue_size must be power-of-two size, otherwise
* iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
- BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
+ if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
+ "Max tfd queue size must be a power of two, but is %d",
+ tfd_queue_max_size))
+ return -EINVAL;
/* Initialize queue's high/low-water marks, and head/tail indexes */
ret = iwl_queue_init(txq, slots_num);
@@ -637,7 +643,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
iwl_pcie_free_tso_page(trans_pcie, skb);
}
iwl_pcie_txq_free_tfd(trans, txq);
- txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
+ txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
if (txq->read_ptr == txq->write_ptr) {
unsigned long flags;
@@ -696,7 +702,8 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
/* De-alloc circular buffer of TFDs */
if (txq->tfds) {
dma_free_coherent(dev,
- trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX,
+ trans_pcie->tfd_size *
+ trans->cfg->base_params->max_tfd_queue_size,
txq->tfds, txq->dma_addr);
txq->dma_addr = 0;
txq->tfds = NULL;
@@ -916,9 +923,11 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
int ret;
int txq_id, slots_num;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u16 bc_tbls_size = trans->cfg->base_params->num_of_queues;
- u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
- sizeof(struct iwlagn_scd_bc_tbl);
+ bc_tbls_size *= (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
+ sizeof(struct iwl_gen3_bc_tbl) :
+ sizeof(struct iwlagn_scd_bc_tbl);
/*It is not allowed to alloc twice, so warn when this happens.
* We cannot rely on the previous allocation, so free and fail */
@@ -928,7 +937,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
}
ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
- scd_bc_tbls_size);
+ bc_tbls_size);
if (ret) {
IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
goto error;
@@ -1064,7 +1073,8 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans_pcie->txq[txq_id];
- int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
+ int tfd_num = iwl_pcie_get_cmd_index(txq, ssn);
+ int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
int last_to_free;
/* This function is not meant to release cmd queue*/
@@ -1079,7 +1089,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
goto out;
}
- if (txq->read_ptr == tfd_num)
+ if (read_ptr == tfd_num)
goto out;
IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
@@ -1087,12 +1097,13 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
/*Since we free until index _not_ inclusive, the one before index is
* the last we will free. This one must be used */
- last_to_free = iwl_queue_dec_wrap(tfd_num);
+ last_to_free = iwl_queue_dec_wrap(trans, tfd_num);
if (!iwl_queue_used(txq, last_to_free)) {
IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
- __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
+ __func__, txq_id, last_to_free,
+ trans->cfg->base_params->max_tfd_queue_size,
txq->write_ptr, txq->read_ptr);
goto out;
}
@@ -1101,10 +1112,10 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
goto out;
for (;
- txq->read_ptr != tfd_num;
- txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) {
- int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
- struct sk_buff *skb = txq->entries[idx].skb;
+ read_ptr != tfd_num;
+ txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr),
+ read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr)) {
+ struct sk_buff *skb = txq->entries[read_ptr].skb;
if (WARN_ON_ONCE(!skb))
continue;
@@ -1113,7 +1124,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
__skb_queue_tail(skbs, skb);
- txq->entries[idx].skb = NULL;
+ txq->entries[read_ptr].skb = NULL;
if (!trans->cfg->use_tfh)
iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
@@ -1123,7 +1134,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
iwl_pcie_txq_progress(txq);
- if (iwl_queue_space(txq) > txq->low_mark &&
+ if (iwl_queue_space(trans, txq) > txq->low_mark &&
test_bit(txq_id, trans_pcie->queue_stopped)) {
struct sk_buff_head overflow_skbs;
@@ -1155,7 +1166,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
}
spin_lock_bh(&txq->lock);
- if (iwl_queue_space(txq) > txq->low_mark)
+ if (iwl_queue_space(trans, txq) > txq->low_mark)
iwl_wake_queue(trans, txq);
}
@@ -1225,23 +1236,30 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
struct iwl_txq *txq = trans_pcie->txq[txq_id];
unsigned long flags;
int nfreed = 0;
+ u16 r;
lockdep_assert_held(&txq->lock);
- if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(txq, idx))) {
+ idx = iwl_pcie_get_cmd_index(txq, idx);
+ r = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
+
+ if (idx >= trans->cfg->base_params->max_tfd_queue_size ||
+ (!iwl_queue_used(txq, idx))) {
IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
- __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
+ __func__, txq_id, idx,
+ trans->cfg->base_params->max_tfd_queue_size,
txq->write_ptr, txq->read_ptr);
return;
}
- for (idx = iwl_queue_inc_wrap(idx); txq->read_ptr != idx;
- txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) {
+ for (idx = iwl_queue_inc_wrap(trans, idx); r != idx;
+ r = iwl_queue_inc_wrap(trans, r)) {
+ txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
if (nfreed++ > 0) {
IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
- idx, txq->write_ptr, txq->read_ptr);
+ idx, txq->write_ptr, r);
iwl_force_nmi(trans);
}
}
@@ -1555,7 +1573,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
spin_lock_bh(&txq->lock);
- if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+ if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
spin_unlock_bh(&txq->lock);
IWL_ERR(trans, "No space in command queue\n");
@@ -1711,7 +1729,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
}
/* Increment and update queue's write index */
- txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
+ txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
iwl_pcie_txq_inc_wr_ptr(trans, txq);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
@@ -2311,11 +2329,11 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
spin_lock(&txq->lock);
- if (iwl_queue_space(txq) < txq->high_mark) {
+ if (iwl_queue_space(trans, txq) < txq->high_mark) {
iwl_stop_queue(trans, txq);
/* don't put the packet on the ring, if there is no room */
- if (unlikely(iwl_queue_space(txq) < 3)) {
+ if (unlikely(iwl_queue_space(trans, txq) < 3)) {
struct iwl_device_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
@@ -2444,7 +2462,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
/* Tell device the write index *just past* this latest filled TFD */
- txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
+ txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
if (!wait_write_ptr)
iwl_pcie_txq_inc_wr_ptr(trans, txq);
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ap.c b/drivers/net/wireless/intersil/hostap/hostap_ap.c
index d1884b8913e7..0094b1d2b577 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ap.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ap.c
@@ -66,7 +66,7 @@ static void prism2_send_mgmt(struct net_device *dev,
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-#ifndef PRISM2_NO_PROCFS_DEBUG
+#if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS)
static int ap_debug_proc_show(struct seq_file *m, void *v)
{
struct ap_data *ap = PDE_DATA(file_inode(m->file));
@@ -81,8 +81,7 @@ static int ap_debug_proc_show(struct seq_file *m, void *v)
seq_printf(m, "tx_drop_nonassoc=%u\n", ap->tx_drop_nonassoc);
return 0;
}
-#endif /* PRISM2_NO_PROCFS_DEBUG */
-
+#endif
static void ap_sta_hash_add(struct ap_data *ap, struct sta_info *sta)
{
@@ -990,7 +989,7 @@ static void prism2_send_mgmt(struct net_device *dev,
}
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-
+#ifdef CONFIG_PROC_FS
static int prism2_sta_proc_show(struct seq_file *m, void *v)
{
struct sta_info *sta = m->private;
@@ -1059,6 +1058,7 @@ static int prism2_sta_proc_show(struct seq_file *m, void *v)
return 0;
}
+#endif
static void handle_add_proc_queue(struct work_struct *work)
{
diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c
index 2720aa39f530..ad1aa65fee7f 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_hw.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c
@@ -151,13 +151,6 @@ static int prism2_get_ram_size(local_info_t *local);
#define HFA384X_MAGIC 0x8A32
#endif
-
-static u16 hfa384x_read_reg(struct net_device *dev, u16 reg)
-{
- return HFA384X_INW(reg);
-}
-
-
static void hfa384x_read_regs(struct net_device *dev,
struct hfa384x_regs *regs)
{
@@ -2897,7 +2890,12 @@ static void hostap_tick_timer(struct timer_list *t)
}
-#ifndef PRISM2_NO_PROCFS_DEBUG
+#if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS)
+static u16 hfa384x_read_reg(struct net_device *dev, u16 reg)
+{
+ return HFA384X_INW(reg);
+}
+
static int prism2_registers_proc_show(struct seq_file *m, void *v)
{
local_info_t *local = m->private;
@@ -2951,8 +2949,7 @@ static int prism2_registers_proc_show(struct seq_file *m, void *v)
return 0;
}
-#endif /* PRISM2_NO_PROCFS_DEBUG */
-
+#endif
struct set_tim_data {
struct list_head list;
diff --git a/drivers/net/wireless/intersil/hostap/hostap_proc.c b/drivers/net/wireless/intersil/hostap/hostap_proc.c
index 5b33ccab9188..703d74cea3c2 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_proc.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_proc.c
@@ -11,8 +11,7 @@
#define PROC_LIMIT (PAGE_SIZE - 80)
-
-#ifndef PRISM2_NO_PROCFS_DEBUG
+#if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS)
static int prism2_debug_proc_show(struct seq_file *m, void *v)
{
local_info_t *local = m->private;
@@ -43,9 +42,9 @@ static int prism2_debug_proc_show(struct seq_file *m, void *v)
return 0;
}
-#endif /* PRISM2_NO_PROCFS_DEBUG */
-
+#endif
+#ifdef CONFIG_PROC_FS
static int prism2_stats_proc_show(struct seq_file *m, void *v)
{
local_info_t *local = m->private;
@@ -82,6 +81,7 @@ static int prism2_stats_proc_show(struct seq_file *m, void *v)
return 0;
}
+#endif
static int prism2_wds_proc_show(struct seq_file *m, void *v)
{
@@ -174,6 +174,7 @@ static const struct seq_operations prism2_bss_list_proc_seqops = {
.show = prism2_bss_list_proc_show,
};
+#ifdef CONFIG_PROC_FS
static int prism2_crypt_proc_show(struct seq_file *m, void *v)
{
local_info_t *local = m->private;
@@ -190,6 +191,7 @@ static int prism2_crypt_proc_show(struct seq_file *m, void *v)
}
return 0;
}
+#endif
static ssize_t prism2_pda_proc_read(struct file *file, char __user *buf,
size_t count, loff_t *_pos)
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 18e819d964f1..998dfac0fcff 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2,6 +2,7 @@
* mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211
* Copyright (c) 2008, Jouni Malinen <j@w1.fi>
* Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
+ * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -2517,6 +2518,123 @@ out_err:
nlmsg_free(mcast_skb);
}
+static const struct ieee80211_sband_iftype_data he_capa_2ghz = {
+ /* TODO: should we support other types, e.g., P2P?*/
+ .types_mask = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP),
+ .he_cap = {
+ .has_he = true,
+ .he_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_HE_MAC_CAP0_HTC_HE,
+ .mac_cap_info[1] =
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
+ IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8,
+ .mac_cap_info[2] =
+ IEEE80211_HE_MAC_CAP2_BSR |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING |
+ IEEE80211_HE_MAC_CAP2_ACK_EN,
+ .mac_cap_info[3] =
+ IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU |
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+ IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2,
+ .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU,
+ .phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_DUAL_BAND,
+ .phy_cap_info[1] =
+ IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
+ IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+ IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS,
+ .phy_cap_info[2] =
+ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO,
+
+ /* Leave all the other PHY capability bytes unset, as
+ * DCM, beam forming, RU and PPE threshold information
+ * are not supported
+ */
+ },
+ .he_mcs_nss_supp = {
+ .rx_mcs_80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80 = cpu_to_le16(0xfffa),
+ .rx_mcs_160 = cpu_to_le16(0xffff),
+ .tx_mcs_160 = cpu_to_le16(0xffff),
+ .rx_mcs_80p80 = cpu_to_le16(0xffff),
+ .tx_mcs_80p80 = cpu_to_le16(0xffff),
+ },
+ },
+};
+
+static const struct ieee80211_sband_iftype_data he_capa_5ghz = {
+ /* TODO: should we support other types, e.g., P2P?*/
+ .types_mask = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP),
+ .he_cap = {
+ .has_he = true,
+ .he_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_HE_MAC_CAP0_HTC_HE,
+ .mac_cap_info[1] =
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
+ IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8,
+ .mac_cap_info[2] =
+ IEEE80211_HE_MAC_CAP2_BSR |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING |
+ IEEE80211_HE_MAC_CAP2_ACK_EN,
+ .mac_cap_info[3] =
+ IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU |
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+ IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2,
+ .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU,
+ .phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_DUAL_BAND |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G,
+ .phy_cap_info[1] =
+ IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
+ IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+ IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS,
+ .phy_cap_info[2] =
+ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO,
+
+ /* Leave all the other PHY capability bytes unset, as
+ * DCM, beam forming, RU and PPE threshold information
+ * are not supported
+ */
+ },
+ .he_mcs_nss_supp = {
+ .rx_mcs_80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80 = cpu_to_le16(0xfffa),
+ .rx_mcs_160 = cpu_to_le16(0xfffa),
+ .tx_mcs_160 = cpu_to_le16(0xfffa),
+ .rx_mcs_80p80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80p80 = cpu_to_le16(0xfffa),
+ },
+ },
+};
+
+static void mac80211_hswim_he_capab(struct ieee80211_supported_band *sband)
+{
+ if (sband->band == NL80211_BAND_2GHZ)
+ sband->iftype_data =
+ (struct ieee80211_sband_iftype_data *)&he_capa_2ghz;
+ else if (sband->band == NL80211_BAND_5GHZ)
+ sband->iftype_data =
+ (struct ieee80211_sband_iftype_data *)&he_capa_5ghz;
+ else
+ return;
+
+ sband->n_iftype_data = 1;
+}
+
static int mac80211_hwsim_new_radio(struct genl_info *info,
struct hwsim_new_radio_params *param)
{
@@ -2678,6 +2796,9 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
struct ieee80211_supported_band *sband = &data->bands[band];
+
+ sband->band = band;
+
switch (band) {
case NL80211_BAND_2GHZ:
sband->channels = data->channels_2ghz;
@@ -2734,6 +2855,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
sband->ht_cap.mcs.rx_mask[1] = 0xff;
sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ mac80211_hswim_he_capab(sband);
+
hw->wiphy->bands[band] = sband;
}
diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
index f99031cfdf86..57edfada0665 100644
--- a/drivers/net/wireless/marvell/libertas/cfg.c
+++ b/drivers/net/wireless/marvell/libertas/cfg.c
@@ -1559,10 +1559,10 @@ static int lbs_cfg_get_station(struct wiphy *wiphy, struct net_device *dev,
int ret;
size_t i;
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES) |
- BIT(NL80211_STA_INFO_TX_PACKETS) |
- BIT(NL80211_STA_INFO_RX_BYTES) |
- BIT(NL80211_STA_INFO_RX_PACKETS);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES) |
+ BIT_ULL(NL80211_STA_INFO_TX_PACKETS) |
+ BIT_ULL(NL80211_STA_INFO_RX_BYTES) |
+ BIT_ULL(NL80211_STA_INFO_RX_PACKETS);
sinfo->tx_bytes = priv->dev->stats.tx_bytes;
sinfo->tx_packets = priv->dev->stats.tx_packets;
sinfo->rx_bytes = priv->dev->stats.rx_bytes;
@@ -1572,14 +1572,14 @@ static int lbs_cfg_get_station(struct wiphy *wiphy, struct net_device *dev,
ret = lbs_get_rssi(priv, &signal, &noise);
if (ret == 0) {
sinfo->signal = signal;
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
}
/* Convert priv->cur_rate from hw_value to NL80211 value */
for (i = 0; i < ARRAY_SIZE(lbs_rates); i++) {
if (priv->cur_rate == lbs_rates[i].hw_value) {
sinfo->txrate.legacy = lbs_rates[i].bitrate;
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
break;
}
}
diff --git a/drivers/net/wireless/marvell/libertas/dev.h b/drivers/net/wireless/marvell/libertas/dev.h
index dd1ee1f0af48..469134930026 100644
--- a/drivers/net/wireless/marvell/libertas/dev.h
+++ b/drivers/net/wireless/marvell/libertas/dev.h
@@ -104,6 +104,7 @@ struct lbs_private {
u8 fw_ready;
u8 surpriseremoved;
u8 setup_fw_on_resume;
+ u8 power_up_on_resume;
int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb);
void (*reset_card) (struct lbs_private *priv);
int (*power_save) (struct lbs_private *priv);
diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
index 2300e796c6ab..43743c26c071 100644
--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
+++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
@@ -1290,15 +1290,23 @@ static void if_sdio_remove(struct sdio_func *func)
static int if_sdio_suspend(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
- int ret;
struct if_sdio_card *card = sdio_get_drvdata(func);
+ struct lbs_private *priv = card->priv;
+ int ret;
mmc_pm_flag_t flags = sdio_get_host_pm_caps(func);
+ priv->power_up_on_resume = false;
/* If we're powered off anyway, just let the mmc layer remove the
* card. */
- if (!lbs_iface_active(card->priv))
- return -ENOSYS;
+ if (!lbs_iface_active(priv)) {
+ if (priv->fw_ready) {
+ priv->power_up_on_resume = true;
+ if_sdio_power_off(card);
+ }
+
+ return 0;
+ }
dev_info(dev, "%s: suspend: PM flags = 0x%x\n",
sdio_func_id(func), flags);
@@ -1306,9 +1314,14 @@ static int if_sdio_suspend(struct device *dev)
/* If we aren't being asked to wake on anything, we should bail out
* and let the SD stack power down the card.
*/
- if (card->priv->wol_criteria == EHS_REMOVE_WAKEUP) {
+ if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
dev_info(dev, "Suspend without wake params -- powering down card\n");
- return -ENOSYS;
+ if (priv->fw_ready) {
+ priv->power_up_on_resume = true;
+ if_sdio_power_off(card);
+ }
+
+ return 0;
}
if (!(flags & MMC_PM_KEEP_POWER)) {
@@ -1321,7 +1334,7 @@ static int if_sdio_suspend(struct device *dev)
if (ret)
return ret;
- ret = lbs_suspend(card->priv);
+ ret = lbs_suspend(priv);
if (ret)
return ret;
@@ -1336,6 +1349,11 @@ static int if_sdio_resume(struct device *dev)
dev_info(dev, "%s: resume: we're back\n", sdio_func_id(func));
+ if (card->priv->power_up_on_resume) {
+ if_sdio_power_on(card);
+ wait_event(card->pwron_waitq, card->priv->fw_ready);
+ }
+
ret = lbs_resume(card->priv);
return ret;
diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
index ffea610f67e2..c67a8e7be310 100644
--- a/drivers/net/wireless/marvell/libertas/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas/if_usb.c
@@ -614,6 +614,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
struct if_usb_card *cardp,
struct lbs_private *priv)
{
+ unsigned long flags;
u8 i;
if (recvlength > LBS_CMD_BUFFER_SIZE) {
@@ -623,9 +624,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
return;
}
- BUG_ON(!in_interrupt());
-
- spin_lock(&priv->driver_lock);
+ spin_lock_irqsave(&priv->driver_lock, flags);
i = (priv->resp_idx == 0) ? 1 : 0;
BUG_ON(priv->resp_len[i]);
@@ -635,7 +634,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
kfree_skb(skb);
lbs_notify_command_response(priv, i);
- spin_unlock(&priv->driver_lock);
+ spin_unlock_irqrestore(&priv->driver_lock, flags);
lbs_deb_usbd(&cardp->udev->dev,
"Wake up main thread to handle cmd response\n");
diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
index 5153922e7ce1..e92fc5001171 100644
--- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
@@ -603,6 +603,8 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
struct if_usb_card *cardp,
struct lbtf_private *priv)
{
+ unsigned long flags;
+
if (recvlength > LBS_CMD_BUFFER_SIZE) {
lbtf_deb_usbd(&cardp->udev->dev,
"The receive buffer is too large\n");
@@ -610,14 +612,12 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
return;
}
- BUG_ON(!in_interrupt());
-
- spin_lock(&priv->driver_lock);
+ spin_lock_irqsave(&priv->driver_lock, flags);
memcpy(priv->cmd_resp_buff, recvbuff + MESSAGE_HEADER_LEN,
recvlength - MESSAGE_HEADER_LEN);
kfree_skb(skb);
lbtf_cmd_response_rx(priv);
- spin_unlock(&priv->driver_lock);
+ spin_unlock_irqrestore(&priv->driver_lock, flags);
}
/**
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index 5d75c971004b..e2addd8b878b 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -696,10 +696,11 @@ void mwifiex_11n_delba(struct mwifiex_private *priv, int tid)
"Send delba to tid=%d, %pM\n",
tid, rx_reor_tbl_ptr->ta);
mwifiex_send_delba(priv, tid, rx_reor_tbl_ptr->ta, 0);
- goto exit;
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+ flags);
+ return;
}
}
-exit:
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 7ab44cd32a9d..8e63d14c1e1c 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -103,6 +103,8 @@ static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
* There could be holes in the buffer, which are skipped by the function.
* Since the buffer is linear, the function uses rotation to simulate
* circular buffer.
+ *
+ * The caller must hold rx_reorder_tbl_lock spinlock.
*/
static void
mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
@@ -111,25 +113,21 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
{
int pkt_to_send, i;
void *rx_tmp_ptr;
- unsigned long flags;
pkt_to_send = (start_win > tbl->start_win) ?
min((start_win - tbl->start_win), tbl->win_size) :
tbl->win_size;
for (i = 0; i < pkt_to_send; ++i) {
- spin_lock_irqsave(&priv->rx_pkt_lock, flags);
rx_tmp_ptr = NULL;
if (tbl->rx_reorder_ptr[i]) {
rx_tmp_ptr = tbl->rx_reorder_ptr[i];
tbl->rx_reorder_ptr[i] = NULL;
}
- spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
if (rx_tmp_ptr)
mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
}
- spin_lock_irqsave(&priv->rx_pkt_lock, flags);
/*
* We don't have a circular buffer, hence use rotation to simulate
* circular buffer
@@ -140,7 +138,6 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
}
tbl->start_win = start_win;
- spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
}
/*
@@ -150,6 +147,8 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
* The start window is adjusted automatically when a hole is located.
* Since the buffer is linear, the function uses rotation to simulate
* circular buffer.
+ *
+ * The caller must hold rx_reorder_tbl_lock spinlock.
*/
static void
mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
@@ -157,21 +156,15 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
{
int i, j, xchg;
void *rx_tmp_ptr;
- unsigned long flags;
for (i = 0; i < tbl->win_size; ++i) {
- spin_lock_irqsave(&priv->rx_pkt_lock, flags);
- if (!tbl->rx_reorder_ptr[i]) {
- spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
+ if (!tbl->rx_reorder_ptr[i])
break;
- }
rx_tmp_ptr = tbl->rx_reorder_ptr[i];
tbl->rx_reorder_ptr[i] = NULL;
- spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
}
- spin_lock_irqsave(&priv->rx_pkt_lock, flags);
/*
* We don't have a circular buffer, hence use rotation to simulate
* circular buffer
@@ -184,7 +177,6 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
}
}
tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1);
- spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
}
/*
@@ -192,6 +184,8 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
*
* The function stops the associated timer and dispatches all the
* pending packets in the Rx reorder table before deletion.
+ *
+ * The caller must hold rx_reorder_tbl_lock spinlock.
*/
static void
mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
@@ -217,11 +211,7 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
del_timer_sync(&tbl->timer_context.timer);
tbl->timer_context.timer_is_set = false;
-
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
list_del(&tbl->list);
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
-
kfree(tbl->rx_reorder_ptr);
kfree(tbl);
@@ -234,22 +224,17 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
/*
* This function returns the pointer to an entry in Rx reordering
* table which matches the given TA/TID pair.
+ *
+ * The caller must hold rx_reorder_tbl_lock spinlock.
*/
struct mwifiex_rx_reorder_tbl *
mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
{
struct mwifiex_rx_reorder_tbl *tbl;
- unsigned long flags;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
- list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) {
- if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
- flags);
+ list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
+ if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid)
return tbl;
- }
- }
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return NULL;
}
@@ -266,14 +251,9 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
return;
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
- list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
- if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
- flags);
+ list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list)
+ if (!memcmp(tbl->ta, ta, ETH_ALEN))
mwifiex_del_rx_reorder_entry(priv, tbl);
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
- }
- }
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return;
@@ -282,24 +262,18 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
/*
* This function finds the last sequence number used in the packets
* buffered in Rx reordering table.
+ *
+ * The caller must hold rx_reorder_tbl_lock spinlock.
*/
static int
mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx)
{
struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr;
- struct mwifiex_private *priv = ctx->priv;
- unsigned long flags;
int i;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
- for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) {
- if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
- flags);
+ for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i)
+ if (rx_reorder_tbl_ptr->rx_reorder_ptr[i])
return i;
- }
- }
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return -1;
}
@@ -317,17 +291,22 @@ mwifiex_flush_data(struct timer_list *t)
struct reorder_tmr_cnxt *ctx =
from_timer(ctx, t, timer);
int start_win, seq_num;
+ unsigned long flags;
ctx->timer_is_set = false;
+ spin_lock_irqsave(&ctx->priv->rx_reorder_tbl_lock, flags);
seq_num = mwifiex_11n_find_last_seq_num(ctx);
- if (seq_num < 0)
+ if (seq_num < 0) {
+ spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
return;
+ }
mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num);
start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1);
mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr,
start_win);
+ spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
}
/*
@@ -354,11 +333,14 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
* If we get a TID, ta pair which is already present dispatch all the
* the packets and move the window size until the ssn
*/
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
if (tbl) {
mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num);
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return;
}
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
/* if !tbl then create one */
new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL);
if (!new_node)
@@ -569,16 +551,20 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
int prev_start_win, start_win, end_win, win_size;
u16 pkt_index;
bool init_window_shift = false;
+ unsigned long flags;
int ret = 0;
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
if (!tbl) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
if (pkt_type != PKT_TYPE_BAR)
mwifiex_11n_dispatch_pkt(priv, payload);
return ret;
}
if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
mwifiex_11n_dispatch_pkt(priv, payload);
return ret;
}
@@ -665,6 +651,8 @@ done:
if (!tbl->timer_context.timer_is_set ||
prev_start_win != tbl->start_win)
mwifiex_11n_rxreorder_timer_restart(tbl);
+
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return ret;
}
@@ -693,14 +681,18 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
peer_mac, tid, initiator);
if (cleanup_rx_reorder_tbl) {
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
peer_mac);
if (!tbl) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+ flags);
mwifiex_dbg(priv->adapter, EVENT,
"event: TID, TA not found in table\n");
return;
}
mwifiex_del_rx_reorder_entry(priv, tbl);
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
} else {
ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac);
if (!ptx_tbl) {
@@ -734,6 +726,7 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
int tid, win_size;
struct mwifiex_rx_reorder_tbl *tbl;
uint16_t block_ack_param_set;
+ unsigned long flags;
block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
@@ -747,17 +740,20 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n",
add_ba_rsp->peer_mac_addr, tid);
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
add_ba_rsp->peer_mac_addr);
if (tbl)
mwifiex_del_rx_reorder_entry(priv, tbl);
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return 0;
}
win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
>> BLOCKACKPARAM_WINSIZE_POS;
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
add_ba_rsp->peer_mac_addr);
if (tbl) {
@@ -768,6 +764,7 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
else
tbl->amsdu = false;
}
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
mwifiex_dbg(priv->adapter, CMD,
"cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
@@ -807,11 +804,8 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
list_for_each_entry_safe(del_tbl_ptr, tmp_node,
- &priv->rx_reorder_tbl_ptr, list) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+ &priv->rx_reorder_tbl_ptr, list)
mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr);
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
- }
INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
@@ -935,6 +929,7 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
int tlv_buf_left = len;
int ret;
u8 *tmp;
+ unsigned long flags;
mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:",
event_buf, len);
@@ -954,14 +949,18 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
tlv_bitmap_len);
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
rx_reor_tbl_ptr =
mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid,
tlv_rxba->mac);
if (!rx_reor_tbl_ptr) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+ flags);
mwifiex_dbg(priv->adapter, ERROR,
"Can not find rx_reorder_tbl!");
return;
}
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
for (i = 0; i < tlv_bitmap_len; i++) {
for (j = 0 ; j < 8; j++) {
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 4b5ae9098504..adc88433faa8 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -1353,17 +1353,17 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
{
u32 rate;
- sinfo->filled = BIT(NL80211_STA_INFO_RX_BYTES) | BIT(NL80211_STA_INFO_TX_BYTES) |
- BIT(NL80211_STA_INFO_RX_PACKETS) | BIT(NL80211_STA_INFO_TX_PACKETS) |
- BIT(NL80211_STA_INFO_TX_BITRATE) |
- BIT(NL80211_STA_INFO_SIGNAL) | BIT(NL80211_STA_INFO_SIGNAL_AVG);
+ sinfo->filled = BIT_ULL(NL80211_STA_INFO_RX_BYTES) | BIT_ULL(NL80211_STA_INFO_TX_BYTES) |
+ BIT_ULL(NL80211_STA_INFO_RX_PACKETS) | BIT_ULL(NL80211_STA_INFO_TX_PACKETS) |
+ BIT_ULL(NL80211_STA_INFO_TX_BITRATE) |
+ BIT_ULL(NL80211_STA_INFO_SIGNAL) | BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
if (!node)
return -ENOENT;
- sinfo->filled |= BIT(NL80211_STA_INFO_INACTIVE_TIME) |
- BIT(NL80211_STA_INFO_TX_FAILED);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME) |
+ BIT_ULL(NL80211_STA_INFO_TX_FAILED);
sinfo->inactive_time =
jiffies_to_msecs(jiffies - node->stats.last_rx);
@@ -1413,7 +1413,7 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
sinfo->txrate.legacy = rate * 5;
if (priv->bss_mode == NL80211_IFTYPE_STATION) {
- sinfo->filled |= BIT(NL80211_STA_INFO_BSS_PARAM);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BSS_PARAM);
sinfo->bss_param.flags = 0;
if (priv->curr_bss_params.bss_descriptor.cap_info_bitmap &
WLAN_CAPABILITY_SHORT_PREAMBLE)
@@ -2322,7 +2322,8 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
if (priv->scan_block)
priv->scan_block = false;
- if (adapter->surprise_removed || adapter->is_cmd_timedout) {
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags) ||
+ test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"%s: Ignore connection.\t"
"Card removed or FW in bad state\n",
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index 9cfcdf6bec52..60db2b969e20 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -372,7 +372,7 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
adapter->ps_state = PS_STATE_SLEEP_CFM;
if (!le16_to_cpu(sleep_cfm_buf->resp_ctrl) &&
- (adapter->is_hs_configured &&
+ (test_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags) &&
!adapter->sleep_period.period)) {
adapter->pm_wakeup_card_req = true;
mwifiex_hs_activated_event(mwifiex_get_priv
@@ -564,25 +564,26 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
return -1;
}
- if (adapter->is_suspended) {
+ if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"PREP_CMD: device in suspended state\n");
return -1;
}
- if (adapter->hs_enabling && cmd_no != HostCmd_CMD_802_11_HS_CFG_ENH) {
+ if (test_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags) &&
+ cmd_no != HostCmd_CMD_802_11_HS_CFG_ENH) {
mwifiex_dbg(adapter, ERROR,
"PREP_CMD: host entering sleep state\n");
return -1;
}
- if (adapter->surprise_removed) {
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"PREP_CMD: card is removed\n");
return -1;
}
- if (adapter->is_cmd_timedout) {
+ if (test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"PREP_CMD: FW is in bad state\n");
return -1;
@@ -789,7 +790,8 @@ int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter)
if (priv && (host_cmd->command !=
cpu_to_le16(HostCmd_CMD_802_11_HS_CFG_ENH))) {
if (adapter->hs_activated) {
- adapter->is_hs_configured = false;
+ clear_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &adapter->work_flags);
mwifiex_hs_activated_event(priv, false);
}
}
@@ -825,7 +827,7 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
return -1;
}
- adapter->is_cmd_timedout = 0;
+ clear_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags);
resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data;
if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) {
@@ -927,7 +929,7 @@ mwifiex_cmd_timeout_func(struct timer_list *t)
struct mwifiex_adapter *adapter = from_timer(adapter, t, cmd_timer);
struct cmd_ctrl_node *cmd_node;
- adapter->is_cmd_timedout = 1;
+ set_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags);
if (!adapter->curr_cmd) {
mwifiex_dbg(adapter, ERROR,
"cmd: empty curr_cmd\n");
@@ -953,7 +955,8 @@ mwifiex_cmd_timeout_func(struct timer_list *t)
mwifiex_dbg(adapter, MSG,
"is_cmd_timedout = %d\n",
- adapter->is_cmd_timedout);
+ test_bit(MWIFIEX_IS_CMD_TIMEDOUT,
+ &adapter->work_flags));
mwifiex_dbg(adapter, MSG,
"num_tx_timeout = %d\n",
adapter->dbg.num_tx_timeout);
@@ -1135,7 +1138,8 @@ void
mwifiex_hs_activated_event(struct mwifiex_private *priv, u8 activated)
{
if (activated) {
- if (priv->adapter->is_hs_configured) {
+ if (test_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &priv->adapter->work_flags)) {
priv->adapter->hs_activated = true;
mwifiex_update_rxreor_flags(priv->adapter,
RXREOR_FORCE_NO_DROP);
@@ -1186,11 +1190,11 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
phs_cfg->params.hs_config.gap);
}
if (conditions != HS_CFG_CANCEL) {
- adapter->is_hs_configured = true;
+ set_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
if (adapter->iface_type == MWIFIEX_USB)
mwifiex_hs_activated_event(priv, true);
} else {
- adapter->is_hs_configured = false;
+ clear_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
if (adapter->hs_activated)
mwifiex_hs_activated_event(priv, false);
}
@@ -1212,8 +1216,8 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter)
adapter->if_ops.wakeup(adapter);
adapter->hs_activated = false;
- adapter->is_hs_configured = false;
- adapter->is_suspended = false;
+ clear_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
+ clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
mwifiex_hs_activated_event(mwifiex_get_priv(adapter,
MWIFIEX_BSS_ROLE_ANY),
false);
@@ -1273,7 +1277,7 @@ mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *adapter,
return;
}
adapter->pm_wakeup_card_req = true;
- if (adapter->is_hs_configured)
+ if (test_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags))
mwifiex_hs_activated_event(mwifiex_get_priv
(adapter, MWIFIEX_BSS_ROLE_ANY),
true);
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index 07453932f703..cce70252fd96 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -813,7 +813,7 @@ mwifiex_hscfg_write(struct file *file, const char __user *ubuf,
MWIFIEX_SYNC_CMD, &hscfg);
mwifiex_enable_hs(priv->adapter);
- priv->adapter->hs_enabling = false;
+ clear_bit(MWIFIEX_IS_HS_ENABLING, &priv->adapter->work_flags);
ret = count;
done:
kfree(buf);
diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c
index b10baacb51c9..75cbd609d606 100644
--- a/drivers/net/wireless/marvell/mwifiex/ie.c
+++ b/drivers/net/wireless/marvell/mwifiex/ie.c
@@ -355,8 +355,14 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
case WLAN_EID_HT_OPERATION:
case WLAN_EID_VHT_CAPABILITY:
case WLAN_EID_VHT_OPERATION:
- case WLAN_EID_VENDOR_SPECIFIC:
break;
+ case WLAN_EID_VENDOR_SPECIFIC:
+ /* Skip only Microsoft WMM IE */
+ if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WMM,
+ (const u8 *)hdr,
+ hdr->len + sizeof(struct ieee_types_header)))
+ break;
default:
memcpy(gen_ie->ie_buffer + ie_len, hdr,
hdr->len + sizeof(struct ieee_types_header));
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index d239e9248c05..673e89dff0b5 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -233,7 +233,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->event_received = false;
adapter->data_received = false;
- adapter->surprise_removed = false;
+ clear_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
@@ -270,7 +270,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
- adapter->is_hs_configured = false;
+ clear_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
adapter->hs_cfg.conditions = cpu_to_le32(HS_CFG_COND_DEF);
adapter->hs_cfg.gpio = HS_CFG_GPIO_DEF;
adapter->hs_cfg.gap = HS_CFG_GAP_DEF;
@@ -439,7 +439,6 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
for (i = 0; i < adapter->priv_num; i++) {
if (adapter->priv[i]) {
priv = adapter->priv[i];
- spin_lock_init(&priv->rx_pkt_lock);
spin_lock_init(&priv->wmm.ra_list_spinlock);
spin_lock_init(&priv->curr_bcn_buf_lock);
spin_lock_init(&priv->sta_list_spinlock);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 510f6b8e717d..20cee5c397fb 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -404,7 +404,8 @@ process_start:
!skb_queue_empty(&adapter->tx_data_q)) {
mwifiex_process_tx_queue(adapter);
if (adapter->hs_activated) {
- adapter->is_hs_configured = false;
+ clear_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &adapter->work_flags);
mwifiex_hs_activated_event
(mwifiex_get_priv
(adapter, MWIFIEX_BSS_ROLE_ANY),
@@ -420,7 +421,8 @@ process_start:
(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
mwifiex_process_bypass_tx(adapter);
if (adapter->hs_activated) {
- adapter->is_hs_configured = false;
+ clear_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &adapter->work_flags);
mwifiex_hs_activated_event
(mwifiex_get_priv
(adapter, MWIFIEX_BSS_ROLE_ANY),
@@ -435,7 +437,8 @@ process_start:
(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
mwifiex_wmm_process_tx(adapter);
if (adapter->hs_activated) {
- adapter->is_hs_configured = false;
+ clear_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &adapter->work_flags);
mwifiex_hs_activated_event
(mwifiex_get_priv
(adapter, MWIFIEX_BSS_ROLE_ANY),
@@ -647,7 +650,7 @@ err_dnld_fw:
if (adapter->if_ops.unregister_dev)
adapter->if_ops.unregister_dev(adapter);
- adapter->surprise_removed = true;
+ set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
mwifiex_terminate_workqueue(adapter);
if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
@@ -870,7 +873,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
"data: %lu BSS(%d-%d): Data <= kernel\n",
jiffies, priv->bss_type, priv->bss_num);
- if (priv->adapter->surprise_removed) {
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &priv->adapter->work_flags)) {
kfree_skb(skb);
priv->stats.tx_dropped++;
return 0;
@@ -1279,7 +1282,8 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
static u16
mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
skb->priority = cfg80211_classify8021d(skb, NULL);
return mwifiex_1d_to_wmm_queue[skb->priority];
@@ -1371,7 +1375,7 @@ static void mwifiex_rx_work_queue(struct work_struct *work)
struct mwifiex_adapter *adapter =
container_of(work, struct mwifiex_adapter, rx_work);
- if (adapter->surprise_removed)
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags))
return;
mwifiex_process_rx(adapter);
}
@@ -1387,7 +1391,7 @@ static void mwifiex_main_work_queue(struct work_struct *work)
struct mwifiex_adapter *adapter =
container_of(work, struct mwifiex_adapter, main_work);
- if (adapter->surprise_removed)
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags))
return;
mwifiex_main_process(adapter);
}
@@ -1404,7 +1408,7 @@ static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter)
if (adapter->if_ops.disable_int)
adapter->if_ops.disable_int(adapter);
- adapter->surprise_removed = true;
+ set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
mwifiex_terminate_workqueue(adapter);
adapter->int_status = 0;
@@ -1492,11 +1496,11 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter)
adapter->if_ops.up_dev(adapter);
adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
- adapter->surprise_removed = false;
+ clear_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
init_waitqueue_head(&adapter->init_wait_q);
- adapter->is_suspended = false;
+ clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
adapter->hs_activated = false;
- adapter->is_cmd_timedout = 0;
+ clear_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags);
init_waitqueue_head(&adapter->hs_activate_wait_q);
init_waitqueue_head(&adapter->cmd_wait_q.wait);
adapter->cmd_wait_q.status = 0;
@@ -1551,7 +1555,7 @@ err_init_fw:
adapter->if_ops.unregister_dev(adapter);
err_kmalloc:
- adapter->surprise_removed = true;
+ set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
mwifiex_terminate_workqueue(adapter);
if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
mwifiex_dbg(adapter, ERROR,
@@ -1648,9 +1652,9 @@ mwifiex_add_card(void *card, struct completion *fw_done,
adapter->fw_done = fw_done;
adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
- adapter->surprise_removed = false;
+ clear_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
init_waitqueue_head(&adapter->init_wait_q);
- adapter->is_suspended = false;
+ clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
adapter->hs_activated = false;
init_waitqueue_head(&adapter->hs_activate_wait_q);
init_waitqueue_head(&adapter->cmd_wait_q.wait);
@@ -1698,7 +1702,7 @@ err_init_fw:
if (adapter->if_ops.unregister_dev)
adapter->if_ops.unregister_dev(adapter);
err_registerdev:
- adapter->surprise_removed = true;
+ set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
mwifiex_terminate_workqueue(adapter);
if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
pr_debug("info: %s: shutdown mwifiex\n", __func__);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 69ac0a22c28c..b025ba164412 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -517,6 +517,14 @@ enum mwifiex_iface_work_flags {
MWIFIEX_IFACE_WORK_CARD_RESET,
};
+enum mwifiex_adapter_work_flags {
+ MWIFIEX_SURPRISE_REMOVED,
+ MWIFIEX_IS_CMD_TIMEDOUT,
+ MWIFIEX_IS_SUSPENDED,
+ MWIFIEX_IS_HS_CONFIGURED,
+ MWIFIEX_IS_HS_ENABLING,
+};
+
struct mwifiex_band_config {
u8 chan_band:2;
u8 chan_width:2;
@@ -616,9 +624,6 @@ struct mwifiex_private {
struct list_head rx_reorder_tbl_ptr;
/* spin lock for rx_reorder_tbl_ptr queue */
spinlock_t rx_reorder_tbl_lock;
- /* spin lock for Rx packets */
- spinlock_t rx_pkt_lock;
-
#define MWIFIEX_ASSOC_RSP_BUF_SIZE 500
u8 assoc_rsp_buf[MWIFIEX_ASSOC_RSP_BUF_SIZE];
u32 assoc_rsp_size;
@@ -875,7 +880,7 @@ struct mwifiex_adapter {
struct device *dev;
struct wiphy *wiphy;
u8 perm_addr[ETH_ALEN];
- bool surprise_removed;
+ unsigned long work_flags;
u32 fw_release_number;
u8 intf_hdr_len;
u16 init_wait_q_woken;
@@ -929,7 +934,6 @@ struct mwifiex_adapter {
struct cmd_ctrl_node *curr_cmd;
/* spin lock for command */
spinlock_t mwifiex_cmd_lock;
- u8 is_cmd_timedout;
u16 last_init_cmd;
struct timer_list cmd_timer;
struct list_head cmd_free_q;
@@ -979,13 +983,10 @@ struct mwifiex_adapter {
u16 pps_uapsd_mode;
u32 pm_wakeup_fw_try;
struct timer_list wakeup_timer;
- u8 is_hs_configured;
struct mwifiex_hs_config_param hs_cfg;
u8 hs_activated;
u16 hs_activate_wait_q_woken;
wait_queue_head_t hs_activate_wait_q;
- bool is_suspended;
- bool hs_enabling;
u8 event_body[MAX_EVENT_SIZE];
u32 hw_dot_11n_dev_cap;
u8 hw_dev_mcs_support;
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 0c42b7296ddd..3fe81b2a929a 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -170,7 +170,7 @@ static int mwifiex_pcie_suspend(struct device *dev)
if (!mwifiex_enable_hs(adapter)) {
mwifiex_dbg(adapter, ERROR,
"cmd: failed to suspend\n");
- adapter->hs_enabling = false;
+ clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
mwifiex_disable_wake(adapter);
return -EFAULT;
}
@@ -178,8 +178,8 @@ static int mwifiex_pcie_suspend(struct device *dev)
flush_workqueue(adapter->workqueue);
/* Indicate device suspended */
- adapter->is_suspended = true;
- adapter->hs_enabling = false;
+ set_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
+ clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
return 0;
}
@@ -207,13 +207,13 @@ static int mwifiex_pcie_resume(struct device *dev)
adapter = card->adapter;
- if (!adapter->is_suspended) {
+ if (!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
mwifiex_dbg(adapter, WARN,
"Device already resumed\n");
return 0;
}
- adapter->is_suspended = false;
+ clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
MWIFIEX_ASYNC_CMD);
@@ -2430,7 +2430,7 @@ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context)
}
adapter = card->adapter;
- if (adapter->surprise_removed)
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags))
goto exit;
if (card->msix_enable)
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 895b806cdb03..8e483b0bc3b1 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -1495,7 +1495,8 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
return -EBUSY;
}
- if (adapter->surprise_removed || adapter->is_cmd_timedout) {
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags) ||
+ test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"Ignore scan. Card removed or firmware in bad state\n");
return -EFAULT;
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index dfdcbc4f141a..d49fbd58afa7 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -181,13 +181,13 @@ static int mwifiex_sdio_resume(struct device *dev)
adapter = card->adapter;
- if (!adapter->is_suspended) {
+ if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
mwifiex_dbg(adapter, WARN,
"device already resumed\n");
return 0;
}
- adapter->is_suspended = false;
+ clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
/* Disable Host Sleep */
mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
@@ -260,7 +260,7 @@ mwifiex_write_data_sync(struct mwifiex_adapter *adapter,
MWIFIEX_SDIO_BLOCK_SIZE) : pkt_len;
u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK);
- if (adapter->is_suspended) {
+ if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"%s: not allowed while suspended\n", __func__);
return -1;
@@ -450,7 +450,7 @@ static int mwifiex_sdio_suspend(struct device *dev)
if (!mwifiex_enable_hs(adapter)) {
mwifiex_dbg(adapter, ERROR,
"cmd: failed to suspend\n");
- adapter->hs_enabling = false;
+ clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
mwifiex_disable_wake(adapter);
return -EFAULT;
}
@@ -460,8 +460,8 @@ static int mwifiex_sdio_suspend(struct device *dev)
ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
/* Indicate device suspended */
- adapter->is_suspended = true;
- adapter->hs_enabling = false;
+ set_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
+ clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
return ret;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index 03a6492662ca..a327fc5b36e3 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -224,7 +224,8 @@ void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code,
adapter->tx_lock_flag = false;
adapter->pps_uapsd_mode = false;
- if (adapter->is_cmd_timedout && adapter->curr_cmd)
+ if (test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags) &&
+ adapter->curr_cmd)
return;
priv->media_connected = false;
mwifiex_dbg(adapter, MSG,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index 5414b755cf82..b454b5f85503 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -419,7 +419,8 @@ int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
}
if (hs_cfg->is_invoke_hostcmd) {
if (hs_cfg->conditions == HS_CFG_CANCEL) {
- if (!adapter->is_hs_configured)
+ if (!test_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &adapter->work_flags))
/* Already cancelled */
break;
/* Save previous condition */
@@ -535,7 +536,7 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
memset(&hscfg, 0, sizeof(hscfg));
hscfg.is_invoke_hostcmd = true;
- adapter->hs_enabling = true;
+ set_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
mwifiex_cancel_all_pending_cmd(adapter);
if (mwifiex_set_hs_params(mwifiex_get_priv(adapter,
@@ -601,7 +602,8 @@ int mwifiex_get_bss_info(struct mwifiex_private *priv,
else
info->wep_status = false;
- info->is_hs_configured = adapter->is_hs_configured;
+ info->is_hs_configured = test_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &adapter->work_flags);
info->is_deep_sleep = adapter->is_deep_sleep;
return 0;
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_tx.c b/drivers/net/wireless/marvell/mwifiex/sta_tx.c
index 620f8650a742..37c24b95e642 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_tx.c
@@ -143,7 +143,7 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
int ret;
struct mwifiex_txinfo *tx_info = NULL;
- if (adapter->surprise_removed)
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags))
return -1;
if (!priv->media_connected)
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index 1e6a62c69ac5..a83c5afc256a 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -289,32 +289,6 @@ int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
src_node->stats.rx_packets++;
}
- skb->dev = priv->netdev;
- skb->protocol = eth_type_trans(skb, priv->netdev);
- skb->ip_summed = CHECKSUM_NONE;
-
- /* This is required only in case of 11n and USB/PCIE as we alloc
- * a buffer of 4K only if its 11N (to be able to receive 4K
- * AMSDU packets). In case of SD we allocate buffers based
- * on the size of packet and hence this is not needed.
- *
- * Modifying the truesize here as our allocation for each
- * skb is 4K but we only receive 2K packets and this cause
- * the kernel to start dropping packets in case where
- * application has allocated buffer based on 2K size i.e.
- * if there a 64K packet received (in IP fragments and
- * application allocates 64K to receive this packet but
- * this packet would almost double up because we allocate
- * each 1.5K fragment in 4K and pass it up. As soon as the
- * 64K limit hits kernel will start to drop rest of the
- * fragments. Currently we fail the Filesndl-ht.scr script
- * for UDP, hence this fix
- */
- if ((adapter->iface_type == MWIFIEX_USB ||
- adapter->iface_type == MWIFIEX_PCIE) &&
- (skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE))
- skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
-
if (is_multicast_ether_addr(p_ethhdr->h_dest) ||
mwifiex_get_sta_entry(priv, p_ethhdr->h_dest)) {
if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN)
@@ -350,6 +324,32 @@ int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
return 0;
}
+ skb->dev = priv->netdev;
+ skb->protocol = eth_type_trans(skb, priv->netdev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* This is required only in case of 11n and USB/PCIE as we alloc
+ * a buffer of 4K only if its 11N (to be able to receive 4K
+ * AMSDU packets). In case of SD we allocate buffers based
+ * on the size of packet and hence this is not needed.
+ *
+ * Modifying the truesize here as our allocation for each
+ * skb is 4K but we only receive 2K packets and this cause
+ * the kernel to start dropping packets in case where
+ * application has allocated buffer based on 2K size i.e.
+ * if there a 64K packet received (in IP fragments and
+ * application allocates 64K to receive this packet but
+ * this packet would almost double up because we allocate
+ * each 1.5K fragment in 4K and pass it up. As soon as the
+ * 64K limit hits kernel will start to drop rest of the
+ * fragments. Currently we fail the Filesndl-ht.scr script
+ * for UDP, hence this fix
+ */
+ if ((adapter->iface_type == MWIFIEX_USB ||
+ adapter->iface_type == MWIFIEX_PCIE) &&
+ skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE)
+ skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
+
/* Forward multicast/broadcast packet to upper layer*/
if (in_interrupt())
netif_rx(skb);
@@ -421,12 +421,15 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
}
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
if (!priv->ap_11n_enabled ||
(!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
(le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
ret = mwifiex_handle_uap_rx_forward(priv, skb);
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return ret;
}
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
/* Reorder and send to kernel */
pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 88f4c89f89ba..433c6a16870b 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -181,7 +181,8 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
atomic_dec(&card->rx_data_urb_pending);
if (recv_length) {
- if (urb->status || (adapter->surprise_removed)) {
+ if (urb->status ||
+ test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"URB status is failed: %d\n", urb->status);
/* Do not free skb in case of command ep */
@@ -218,10 +219,10 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
dev_kfree_skb_any(skb);
}
} else if (urb->status) {
- if (!adapter->is_suspended) {
+ if (!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
mwifiex_dbg(adapter, FATAL,
"Card is removed: %d\n", urb->status);
- adapter->surprise_removed = true;
+ set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
}
dev_kfree_skb_any(skb);
return;
@@ -529,7 +530,7 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message)
return 0;
}
- if (unlikely(adapter->is_suspended))
+ if (unlikely(test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)))
mwifiex_dbg(adapter, WARN,
"Device already suspended\n");
@@ -537,19 +538,19 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message)
if (!mwifiex_enable_hs(adapter)) {
mwifiex_dbg(adapter, ERROR,
"cmd: failed to suspend\n");
- adapter->hs_enabling = false;
+ clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
return -EFAULT;
}
- /* 'is_suspended' flag indicates device is suspended.
+ /* 'MWIFIEX_IS_SUSPENDED' bit indicates device is suspended.
* It must be set here before the usb_kill_urb() calls. Reason
* is in the complete handlers, urb->status(= -ENOENT) and
* this flag is used in combination to distinguish between a
* 'suspended' state and a 'disconnect' one.
*/
- adapter->is_suspended = true;
- adapter->hs_enabling = false;
+ set_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
+ clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb)
usb_kill_urb(card->rx_cmd.urb);
@@ -593,7 +594,7 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
}
adapter = card->adapter;
- if (unlikely(!adapter->is_suspended)) {
+ if (unlikely(!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags))) {
mwifiex_dbg(adapter, WARN,
"Device already resumed\n");
return 0;
@@ -602,7 +603,7 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
/* Indicate device resumed. The netdev queue will be resumed only
* after the urbs have been re-submitted
*/
- adapter->is_suspended = false;
+ clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
if (!atomic_read(&card->rx_data_urb_pending))
for (i = 0; i < MWIFIEX_RX_DATA_URB; i++)
@@ -1158,13 +1159,13 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
unsigned long flags;
int idx, ret;
- if (adapter->is_suspended) {
+ if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"%s: not allowed while suspended\n", __func__);
return -1;
}
- if (adapter->surprise_removed) {
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR, "%s: device removed\n", __func__);
return -1;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
index 6dd212898117..f9b71539d33e 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.c
+++ b/drivers/net/wireless/marvell/mwifiex/util.c
@@ -197,9 +197,11 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
info->is_deep_sleep = adapter->is_deep_sleep;
info->pm_wakeup_card_req = adapter->pm_wakeup_card_req;
info->pm_wakeup_fw_try = adapter->pm_wakeup_fw_try;
- info->is_hs_configured = adapter->is_hs_configured;
+ info->is_hs_configured = test_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &adapter->work_flags);
info->hs_activated = adapter->hs_activated;
- info->is_cmd_timedout = adapter->is_cmd_timedout;
+ info->is_cmd_timedout = test_bit(MWIFIEX_IS_CMD_TIMEDOUT,
+ &adapter->work_flags);
info->num_cmd_host_to_card_failure
= adapter->dbg.num_cmd_host_to_card_failure;
info->num_cmd_sleep_cfm_host_to_card_failure
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index 936a0a841af8..407b9932ca4d 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -599,7 +599,7 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
if (priv->adapter->if_ops.clean_pcie_ring &&
- !priv->adapter->surprise_removed)
+ !test_bit(MWIFIEX_SURPRISE_REMOVED, &priv->adapter->work_flags))
priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig
index fc05d79c80d0..b6c5f17dca30 100644
--- a/drivers/net/wireless/mediatek/mt76/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/Kconfig
@@ -1,10 +1,37 @@
config MT76_CORE
tristate
+config MT76_USB
+ tristate
+ depends on MT76_CORE
+
+config MT76x2_COMMON
+ tristate
+ depends on MT76_CORE
+
+config MT76x0U
+ tristate "MediaTek MT76x0U (USB) support"
+ select MT76_CORE
+ depends on MAC80211
+ depends on USB
+ help
+ This adds support for MT7610U-based wireless USB dongles.
+
config MT76x2E
tristate "MediaTek MT76x2E (PCIe) support"
select MT76_CORE
+ select MT76x2_COMMON
depends on MAC80211
depends on PCI
---help---
This adds support for MT7612/MT7602/MT7662-based wireless PCIe devices.
+
+config MT76x2U
+ tristate "MediaTek MT76x2U (USB) support"
+ select MT76_CORE
+ select MT76_USB
+ select MT76x2_COMMON
+ depends on MAC80211
+ depends on USB
+ help
+ This adds support for MT7612U-based wireless USB dongles.
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
index a0156bc01dea..158d10d2716c 100644
--- a/drivers/net/wireless/mediatek/mt76/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -1,15 +1,31 @@
obj-$(CONFIG_MT76_CORE) += mt76.o
+obj-$(CONFIG_MT76_USB) += mt76-usb.o
+obj-$(CONFIG_MT76x0U) += mt76x0/
+obj-$(CONFIG_MT76x2_COMMON) += mt76x2-common.o
obj-$(CONFIG_MT76x2E) += mt76x2e.o
+obj-$(CONFIG_MT76x2U) += mt76x2u.o
mt76-y := \
mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o tx.o agg-rx.o
+mt76-usb-y := usb.o usb_trace.o usb_mcu.o
+
CFLAGS_trace.o := -I$(src)
+CFLAGS_usb_trace.o := -I$(src)
+
+mt76x2-common-y := \
+ mt76x2_eeprom.o mt76x2_tx_common.o mt76x2_mac_common.o \
+ mt76x2_init_common.o mt76x2_common.o mt76x2_phy_common.o \
+ mt76x2_debugfs.o
mt76x2e-y := \
mt76x2_pci.o mt76x2_dma.o \
- mt76x2_main.o mt76x2_init.o mt76x2_debugfs.o mt76x2_tx.o \
- mt76x2_core.o mt76x2_mac.o mt76x2_eeprom.o mt76x2_mcu.o mt76x2_phy.o \
+ mt76x2_main.o mt76x2_init.o mt76x2_tx.o \
+ mt76x2_core.o mt76x2_mac.o mt76x2_mcu.o mt76x2_phy.o \
mt76x2_dfs.o mt76x2_trace.o
+mt76x2u-y := \
+ mt76x2_usb.o mt76x2u_init.o mt76x2u_main.o mt76x2u_mac.o \
+ mt76x2u_mcu.o mt76x2u_phy.o mt76x2u_core.o
+
CFLAGS_mt76x2_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
index 1e8cdce919d9..73c8b2805c97 100644
--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -113,7 +113,7 @@ mt76_rx_aggr_reorder_work(struct work_struct *work)
if (nframes)
ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
REORDER_TIMEOUT);
- mt76_rx_complete(dev, &frames, -1);
+ mt76_rx_complete(dev, &frames, NULL);
rcu_read_unlock();
local_bh_enable();
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 3dbedcedc2c4..c51da2205b93 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -239,6 +239,80 @@ mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
iowrite32(q->head, &q->regs->cpu_idx);
}
+int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta)
+{
+ struct mt76_queue_entry e;
+ struct mt76_txwi_cache *t;
+ struct mt76_queue_buf buf[32];
+ struct sk_buff *iter;
+ dma_addr_t addr;
+ int len;
+ u32 tx_info = 0;
+ int n, ret;
+
+ t = mt76_get_txwi(dev);
+ if (!t) {
+ ieee80211_free_txskb(dev->hw, skb);
+ return -ENOMEM;
+ }
+
+ dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
+ DMA_TO_DEVICE);
+ ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,
+ &tx_info);
+ dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi),
+ DMA_TO_DEVICE);
+ if (ret < 0)
+ goto free;
+
+ len = skb->len - skb->data_len;
+ addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev->dev, addr)) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ n = 0;
+ buf[n].addr = t->dma_addr;
+ buf[n++].len = dev->drv->txwi_size;
+ buf[n].addr = addr;
+ buf[n++].len = len;
+
+ skb_walk_frags(skb, iter) {
+ if (n == ARRAY_SIZE(buf))
+ goto unmap;
+
+ addr = dma_map_single(dev->dev, iter->data, iter->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev->dev, addr))
+ goto unmap;
+
+ buf[n].addr = addr;
+ buf[n++].len = iter->len;
+ }
+
+ if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
+ goto unmap;
+
+ return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t);
+
+unmap:
+ ret = -ENOMEM;
+ for (n--; n > 0; n--)
+ dma_unmap_single(dev->dev, buf[n].addr, buf[n].len,
+ DMA_TO_DEVICE);
+
+free:
+ e.skb = skb;
+ e.txwi = t;
+ dev->drv->tx_complete_skb(dev, q, &e, true);
+ mt76_put_txwi(dev, t);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_dma_tx_queue_skb);
+
static int
mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi)
{
@@ -400,7 +474,7 @@ mt76_dma_rx_poll(struct napi_struct *napi, int budget)
do {
cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
- mt76_rx_poll_complete(dev, qid);
+ mt76_rx_poll_complete(dev, qid, napi);
done += cur;
} while (cur && done < budget);
@@ -436,6 +510,7 @@ static const struct mt76_queue_ops mt76_dma_ops = {
.init = mt76_dma_init,
.alloc = mt76_dma_alloc_queue,
.add_buf = mt76_dma_add_buf,
+ .tx_queue_skb = mt76_dma_tx_queue_skb,
.tx_cleanup = mt76_dma_tx_cleanup,
.rx_reset = mt76_dma_rx_reset,
.kick = mt76_dma_kick_queue,
diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h
index 1dad39697929..27248e24a19b 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.h
+++ b/drivers/net/wireless/mediatek/mt76/dma.h
@@ -25,6 +25,39 @@
#define MT_DMA_CTL_LAST_SEC0 BIT(30)
#define MT_DMA_CTL_DMA_DONE BIT(31)
+#define MT_TXD_INFO_LEN GENMASK(15, 0)
+#define MT_TXD_INFO_NEXT_VLD BIT(16)
+#define MT_TXD_INFO_TX_BURST BIT(17)
+#define MT_TXD_INFO_80211 BIT(19)
+#define MT_TXD_INFO_TSO BIT(20)
+#define MT_TXD_INFO_CSO BIT(21)
+#define MT_TXD_INFO_WIV BIT(24)
+#define MT_TXD_INFO_QSEL GENMASK(26, 25)
+#define MT_TXD_INFO_DPORT GENMASK(29, 27)
+#define MT_TXD_INFO_TYPE GENMASK(31, 30)
+
+#define MT_RX_FCE_INFO_LEN GENMASK(13, 0)
+#define MT_RX_FCE_INFO_SELF_GEN BIT(15)
+#define MT_RX_FCE_INFO_CMD_SEQ GENMASK(19, 16)
+#define MT_RX_FCE_INFO_EVT_TYPE GENMASK(23, 20)
+#define MT_RX_FCE_INFO_PCIE_INTR BIT(24)
+#define MT_RX_FCE_INFO_QSEL GENMASK(26, 25)
+#define MT_RX_FCE_INFO_D_PORT GENMASK(29, 27)
+#define MT_RX_FCE_INFO_TYPE GENMASK(31, 30)
+
+/* MCU request message header */
+#define MT_MCU_MSG_LEN GENMASK(15, 0)
+#define MT_MCU_MSG_CMD_SEQ GENMASK(19, 16)
+#define MT_MCU_MSG_CMD_TYPE GENMASK(26, 20)
+#define MT_MCU_MSG_PORT GENMASK(29, 27)
+#define MT_MCU_MSG_TYPE GENMASK(31, 30)
+#define MT_MCU_MSG_TYPE_CMD BIT(30)
+
+#define MT_DMA_HDR_LEN 4
+#define MT_RX_INFO_LEN 4
+#define MT_FCE_INFO_LEN 4
+#define MT_RX_RXWI_LEN 32
+
struct mt76_desc {
__le32 buf0;
__le32 ctrl;
@@ -32,6 +65,16 @@ struct mt76_desc {
__le32 info;
} __packed __aligned(4);
+enum dma_msg_port {
+ WLAN_PORT,
+ CPU_RX_PORT,
+ CPU_TX_PORT,
+ HOST_PORT,
+ VIRTUAL_CPU_RX_PORT,
+ VIRTUAL_CPU_TX_PORT,
+ DISCARD,
+};
+
int mt76_dma_attach(struct mt76_dev *dev);
void mt76_dma_cleanup(struct mt76_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index d62e34e7eadf..029d54bce9e8 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -303,14 +303,6 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
SET_IEEE80211_DEV(hw, dev->dev);
SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
- wiphy->interface_modes =
- BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP) |
-#ifdef CONFIG_MAC80211_MESH
- BIT(NL80211_IFTYPE_MESH_POINT) |
-#endif
- BIT(NL80211_IFTYPE_ADHOC);
-
wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
wiphy->available_antennas_tx = dev->antenna_mask;
@@ -591,15 +583,11 @@ mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb)
}
void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
- int queue)
+ struct napi_struct *napi)
{
- struct napi_struct *napi = NULL;
struct ieee80211_sta *sta;
struct sk_buff *skb;
- if (queue >= 0)
- napi = &dev->napi[queue];
-
spin_lock(&dev->rx_lock);
while ((skb = __skb_dequeue(frames)) != NULL) {
if (mt76_check_ccmp_pn(skb)) {
@@ -613,7 +601,8 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
spin_unlock(&dev->rx_lock);
}
-void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q)
+void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
+ struct napi_struct *napi)
{
struct sk_buff_head frames;
struct sk_buff *skb;
@@ -625,5 +614,6 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q)
mt76_rx_aggr_reorder(skb, &frames);
}
- mt76_rx_complete(dev, &frames, q);
+ mt76_rx_complete(dev, &frames, napi);
}
+EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index d2166fbf50ff..2eab35879163 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -22,6 +22,7 @@
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/leds.h>
+#include <linux/usb.h>
#include <net/mac80211.h>
#include "util.h"
@@ -30,6 +31,7 @@
#define MT_RX_BUF_SIZE 2048
struct mt76_dev;
+struct mt76_wcid;
struct mt76_bus_ops {
u32 (*rr)(struct mt76_dev *dev, u32 offset);
@@ -62,12 +64,22 @@ struct mt76_queue_buf {
int len;
};
+struct mt76u_buf {
+ struct mt76_dev *dev;
+ struct urb *urb;
+ size_t len;
+ bool done;
+};
+
struct mt76_queue_entry {
union {
void *buf;
struct sk_buff *skb;
};
- struct mt76_txwi_cache *txwi;
+ union {
+ struct mt76_txwi_cache *txwi;
+ struct mt76u_buf ubuf;
+ };
bool schedule;
};
@@ -88,6 +100,7 @@ struct mt76_queue {
struct list_head swq;
int swq_queued;
+ u16 first;
u16 head;
u16 tail;
int ndesc;
@@ -110,6 +123,10 @@ struct mt76_queue_ops {
struct mt76_queue_buf *buf, int nbufs, u32 info,
struct sk_buff *skb, void *txwi);
+ int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta);
+
void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
int *len, u32 *info, bool *more);
@@ -187,9 +204,13 @@ struct mt76_rx_tid {
enum {
MT76_STATE_INITIALIZED,
MT76_STATE_RUNNING,
+ MT76_STATE_MCU_RUNNING,
MT76_SCANNING,
MT76_RESET,
MT76_OFFCHANNEL,
+ MT76_REMOVED,
+ MT76_READING_STATS,
+ MT76_MORE_STATS,
};
struct mt76_hw_cap {
@@ -210,6 +231,8 @@ struct mt76_driver_ops {
void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_entry *e, bool flush);
+ bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
+
void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
struct sk_buff *skb);
@@ -229,6 +252,64 @@ struct mt76_sband {
struct mt76_channel_state *chan;
};
+/* addr req mask */
+#define MT_VEND_TYPE_EEPROM BIT(31)
+#define MT_VEND_TYPE_CFG BIT(30)
+#define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
+
+#define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n))
+enum mt_vendor_req {
+ MT_VEND_DEV_MODE = 0x1,
+ MT_VEND_WRITE = 0x2,
+ MT_VEND_MULTI_WRITE = 0x6,
+ MT_VEND_MULTI_READ = 0x7,
+ MT_VEND_READ_EEPROM = 0x9,
+ MT_VEND_WRITE_FCE = 0x42,
+ MT_VEND_WRITE_CFG = 0x46,
+ MT_VEND_READ_CFG = 0x47,
+};
+
+enum mt76u_in_ep {
+ MT_EP_IN_PKT_RX,
+ MT_EP_IN_CMD_RESP,
+ __MT_EP_IN_MAX,
+};
+
+enum mt76u_out_ep {
+ MT_EP_OUT_INBAND_CMD,
+ MT_EP_OUT_AC_BK,
+ MT_EP_OUT_AC_BE,
+ MT_EP_OUT_AC_VI,
+ MT_EP_OUT_AC_VO,
+ MT_EP_OUT_HCCA,
+ __MT_EP_OUT_MAX,
+};
+
+#define MT_SG_MAX_SIZE 8
+#define MT_NUM_TX_ENTRIES 256
+#define MT_NUM_RX_ENTRIES 128
+#define MCU_RESP_URB_SIZE 1024
+struct mt76_usb {
+ struct mutex usb_ctrl_mtx;
+ u8 data[32];
+
+ struct tasklet_struct rx_tasklet;
+ struct tasklet_struct tx_tasklet;
+ struct delayed_work stat_work;
+
+ u8 out_ep[__MT_EP_OUT_MAX];
+ u16 out_max_packet;
+ u8 in_ep[__MT_EP_IN_MAX];
+ u16 in_max_packet;
+
+ struct mt76u_mcu {
+ struct mutex mutex;
+ struct completion cmpl;
+ struct mt76u_buf res;
+ u32 msg_seq;
+ } mcu;
+};
+
struct mt76_dev {
struct ieee80211_hw *hw;
struct cfg80211_chan_def chandef;
@@ -271,6 +352,8 @@ struct mt76_dev {
char led_name[32];
bool led_al;
u8 led_pin;
+
+ struct mt76_usb usb;
};
enum mt76_phy_type {
@@ -390,6 +473,26 @@ struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
int mt76_eeprom_init(struct mt76_dev *dev, int len);
void mt76_eeprom_override(struct mt76_dev *dev);
+/* increment with wrap-around */
+static inline int mt76_incr(int val, int size)
+{
+ return (val + 1) & (size - 1);
+}
+
+/* decrement with wrap-around */
+static inline int mt76_decr(int val, int size)
+{
+ return (val - 1) & (size - 1);
+}
+
+/* Hardware uses mirrored order of queues with Q3
+ * having the highest priority
+ */
+static inline u8 q2hwq(u8 q)
+{
+ return q ^ 0x3;
+}
+
static inline struct ieee80211_txq *
mtxq_to_txq(struct mt76_txq *mtxq)
{
@@ -409,9 +512,9 @@ wcid_to_sta(struct mt76_wcid *wcid)
return container_of(ptr, struct ieee80211_sta, drv_priv);
}
-int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta);
+int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta);
void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
@@ -442,10 +545,69 @@ void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
/* internal */
void mt76_tx_free(struct mt76_dev *dev);
+struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev);
void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
- int queue);
-void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q);
+ struct napi_struct *napi);
+void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
+ struct napi_struct *napi);
void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
+/* usb */
+static inline bool mt76u_urb_error(struct urb *urb)
+{
+ return urb->status &&
+ urb->status != -ECONNRESET &&
+ urb->status != -ESHUTDOWN &&
+ urb->status != -ENOENT;
+}
+
+/* Map hardware queues to usb endpoints */
+static inline u8 q2ep(u8 qid)
+{
+ /* TODO: take management packets to queue 5 */
+ return qid + 1;
+}
+
+static inline bool mt76u_check_sg(struct mt76_dev *dev)
+{
+ struct usb_interface *intf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+
+ return (udev->bus->sg_tablesize > 0 &&
+ (udev->bus->no_sg_constraint ||
+ udev->speed == USB_SPEED_WIRELESS));
+}
+
+int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
+ u8 req_type, u16 val, u16 offset,
+ void *buf, size_t len);
+void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
+ const u16 offset, const u32 val);
+u32 mt76u_rr(struct mt76_dev *dev, u32 addr);
+void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val);
+int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
+void mt76u_deinit(struct mt76_dev *dev);
+int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
+ int nsgs, int len, int sglen, gfp_t gfp);
+void mt76u_buf_free(struct mt76u_buf *buf);
+int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
+ struct mt76u_buf *buf, gfp_t gfp,
+ usb_complete_t complete_fn, void *context);
+int mt76u_submit_rx_buffers(struct mt76_dev *dev);
+int mt76u_alloc_queues(struct mt76_dev *dev);
+void mt76u_stop_queues(struct mt76_dev *dev);
+void mt76u_stop_stat_wk(struct mt76_dev *dev);
+void mt76u_queues_deinit(struct mt76_dev *dev);
+int mt76u_skb_dma_info(struct sk_buff *skb, int port, u32 flags);
+
+int mt76u_mcu_fw_send_data(struct mt76_dev *dev, const void *data,
+ int data_len, u32 max_payload, u32 offset);
+void mt76u_mcu_complete_urb(struct urb *urb);
+struct sk_buff *mt76u_mcu_msg_alloc(const void *data, int len);
+int mt76u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
+ int cmd, bool wait_resp);
+void mt76u_mcu_fw_reset(struct mt76_dev *dev);
+int mt76u_mcu_init_rx(struct mt76_dev *dev);
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile b/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile
new file mode 100644
index 000000000000..7843908261ba
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_MT76x0U) += mt76x0.o
+
+mt76x0-objs = \
+ usb.o init.o main.o mcu.o trace.o dma.o eeprom.o phy.o \
+ mac.o util.o debugfs.o tx.o core.o
+# ccflags-y := -DDEBUG
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/core.c b/drivers/net/wireless/mediatek/mt76/mt76x0/core.c
new file mode 100644
index 000000000000..892803fce842
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/core.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+
+int mt76x0_wait_asic_ready(struct mt76x0_dev *dev)
+{
+ int i = 100;
+ u32 val;
+
+ do {
+ if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ return -EIO;
+
+ val = mt76_rr(dev, MT_MAC_CSR0);
+ if (val && ~val)
+ return 0;
+
+ udelay(10);
+ } while (i--);
+
+ return -EIO;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c
new file mode 100644
index 000000000000..e7a77a886068
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+
+#include "mt76x0.h"
+#include "eeprom.h"
+
+static int
+mt76_reg_set(void *data, u64 val)
+{
+ struct mt76x0_dev *dev = data;
+
+ mt76_wr(dev, dev->debugfs_reg, val);
+ return 0;
+}
+
+static int
+mt76_reg_get(void *data, u64 *val)
+{
+ struct mt76x0_dev *dev = data;
+
+ *val = mt76_rr(dev, dev->debugfs_reg);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n");
+
+static int
+mt76x0_ampdu_stat_read(struct seq_file *file, void *data)
+{
+ struct mt76x0_dev *dev = file->private;
+ int i, j;
+
+#define stat_printf(grp, off, name) \
+ seq_printf(file, #name ":\t%llu\n", dev->stats.grp[off])
+
+ stat_printf(rx_stat, 0, rx_crc_err);
+ stat_printf(rx_stat, 1, rx_phy_err);
+ stat_printf(rx_stat, 2, rx_false_cca);
+ stat_printf(rx_stat, 3, rx_plcp_err);
+ stat_printf(rx_stat, 4, rx_fifo_overflow);
+ stat_printf(rx_stat, 5, rx_duplicate);
+
+ stat_printf(tx_stat, 0, tx_fail_cnt);
+ stat_printf(tx_stat, 1, tx_bcn_cnt);
+ stat_printf(tx_stat, 2, tx_success);
+ stat_printf(tx_stat, 3, tx_retransmit);
+ stat_printf(tx_stat, 4, tx_zero_len);
+ stat_printf(tx_stat, 5, tx_underflow);
+
+ stat_printf(aggr_stat, 0, non_aggr_tx);
+ stat_printf(aggr_stat, 1, aggr_tx);
+
+ stat_printf(zero_len_del, 0, tx_zero_len_del);
+ stat_printf(zero_len_del, 1, rx_zero_len_del);
+#undef stat_printf
+
+ seq_puts(file, "Aggregations stats:\n");
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 8; j++)
+ seq_printf(file, "%08llx ",
+ dev->stats.aggr_n[i * 8 + j]);
+ seq_putc(file, '\n');
+ }
+
+ seq_printf(file, "recent average AMPDU len: %d\n",
+ atomic_read(&dev->avg_ampdu_len));
+
+ return 0;
+}
+
+static int
+mt76x0_ampdu_stat_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, mt76x0_ampdu_stat_read, inode->i_private);
+}
+
+static const struct file_operations fops_ampdu_stat = {
+ .open = mt76x0_ampdu_stat_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int
+mt76x0_eeprom_param_read(struct seq_file *file, void *data)
+{
+ struct mt76x0_dev *dev = file->private;
+ int i;
+
+ seq_printf(file, "RF freq offset: %hhx\n", dev->ee->rf_freq_off);
+ seq_printf(file, "RSSI offset 2GHz: %hhx %hhx\n",
+ dev->ee->rssi_offset_2ghz[0], dev->ee->rssi_offset_2ghz[1]);
+ seq_printf(file, "RSSI offset 5GHz: %hhx %hhx %hhx\n",
+ dev->ee->rssi_offset_5ghz[0], dev->ee->rssi_offset_5ghz[1],
+ dev->ee->rssi_offset_5ghz[2]);
+ seq_printf(file, "Temperature offset: %hhx\n", dev->ee->temp_off);
+ seq_printf(file, "LNA gain 2Ghz: %hhx\n", dev->ee->lna_gain_2ghz);
+ seq_printf(file, "LNA gain 5Ghz: %hhx %hhx %hhx\n",
+ dev->ee->lna_gain_5ghz[0], dev->ee->lna_gain_5ghz[1],
+ dev->ee->lna_gain_5ghz[2]);
+ seq_printf(file, "Power Amplifier type %hhx\n", dev->ee->pa_type);
+ seq_printf(file, "Reg channels: %hhu-%hhu\n", dev->ee->reg.start,
+ dev->ee->reg.start + dev->ee->reg.num - 1);
+
+ seq_puts(file, "Per channel power:\n");
+ for (i = 0; i < 58; i++)
+ seq_printf(file, "\t%d chan:%d pwr:%d\n", i, i,
+ dev->ee->tx_pwr_per_chan[i]);
+
+ seq_puts(file, "Per rate power 2GHz:\n");
+ for (i = 0; i < 5; i++)
+ seq_printf(file, "\t %d bw20:%d bw40:%d\n",
+ i, dev->ee->tx_pwr_cfg_2g[i][0],
+ dev->ee->tx_pwr_cfg_5g[i][1]);
+
+ seq_puts(file, "Per rate power 5GHz:\n");
+ for (i = 0; i < 5; i++)
+ seq_printf(file, "\t %d bw20:%d bw40:%d\n",
+ i, dev->ee->tx_pwr_cfg_5g[i][0],
+ dev->ee->tx_pwr_cfg_5g[i][1]);
+
+ return 0;
+}
+
+static int
+mt76x0_eeprom_param_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, mt76x0_eeprom_param_read, inode->i_private);
+}
+
+static const struct file_operations fops_eeprom_param = {
+ .open = mt76x0_eeprom_param_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void mt76x0_init_debugfs(struct mt76x0_dev *dev)
+{
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("mt76x0", dev->mt76.hw->wiphy->debugfsdir);
+ if (!dir)
+ return;
+
+ debugfs_create_u32("regidx", S_IRUSR | S_IWUSR, dir, &dev->debugfs_reg);
+ debugfs_create_file("regval", S_IRUSR | S_IWUSR, dir, dev,
+ &fops_regval);
+ debugfs_create_file("ampdu_stat", S_IRUSR, dir, dev, &fops_ampdu_stat);
+ debugfs_create_file("eeprom_param", S_IRUSR, dir, dev,
+ &fops_eeprom_param);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c
new file mode 100644
index 000000000000..e2efb430419b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c
@@ -0,0 +1,522 @@
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+#include "dma.h"
+#include "usb.h"
+#include "trace.h"
+
+static int mt76x0_submit_rx_buf(struct mt76x0_dev *dev,
+ struct mt76x0_dma_buf_rx *e, gfp_t gfp);
+
+static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len)
+{
+ const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data;
+ unsigned int hdrlen;
+
+ if (unlikely(len < 10))
+ return 0;
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ if (unlikely(hdrlen > len))
+ return 0;
+ return hdrlen;
+}
+
+static struct sk_buff *
+mt76x0_rx_skb_from_seg(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi,
+ void *data, u32 seg_len, u32 truesize, struct page *p)
+{
+ struct sk_buff *skb;
+ u32 true_len, hdr_len = 0, copy, frag;
+
+ skb = alloc_skb(p ? 128 : seg_len, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ true_len = mt76x0_mac_process_rx(dev, skb, data, rxwi);
+ if (!true_len || true_len > seg_len)
+ goto bad_frame;
+
+ hdr_len = ieee80211_get_hdrlen_from_buf(data, true_len);
+ if (!hdr_len)
+ goto bad_frame;
+
+ if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
+ memcpy(skb_put(skb, hdr_len), data, hdr_len);
+
+ data += hdr_len + 2;
+ true_len -= hdr_len;
+ hdr_len = 0;
+ }
+
+ /* If not doing paged RX allocated skb will always have enough space */
+ copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
+ frag = true_len - copy;
+
+ memcpy(skb_put(skb, copy), data, copy);
+ data += copy;
+
+ if (frag) {
+ skb_add_rx_frag(skb, 0, p, data - page_address(p),
+ frag, truesize);
+ get_page(p);
+ }
+
+ return skb;
+
+bad_frame:
+ dev_err_ratelimited(dev->mt76.dev, "Error: incorrect frame len:%u hdr:%u\n",
+ true_len, hdr_len);
+ dev_kfree_skb(skb);
+ return NULL;
+}
+
+static void mt76x0_rx_process_seg(struct mt76x0_dev *dev, u8 *data,
+ u32 seg_len, struct page *p)
+{
+ struct sk_buff *skb;
+ struct mt76x0_rxwi *rxwi;
+ u32 fce_info, truesize = seg_len;
+
+ /* DMA_INFO field at the beginning of the segment contains only some of
+ * the information, we need to read the FCE descriptor from the end.
+ */
+ fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN);
+ seg_len -= MT_FCE_INFO_LEN;
+
+ data += MT_DMA_HDR_LEN;
+ seg_len -= MT_DMA_HDR_LEN;
+
+ rxwi = (struct mt76x0_rxwi *) data;
+ data += sizeof(struct mt76x0_rxwi);
+ seg_len -= sizeof(struct mt76x0_rxwi);
+
+ if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info)))
+ dev_err_once(dev->mt76.dev, "Error: RX path seen a non-pkt urb\n");
+
+ trace_mt76x0_rx(&dev->mt76, rxwi, fce_info);
+
+ skb = mt76x0_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p);
+ if (!skb)
+ return;
+
+ spin_lock(&dev->mac_lock);
+ ieee80211_rx(dev->mt76.hw, skb);
+ spin_unlock(&dev->mac_lock);
+}
+
+static u16 mt76x0_rx_next_seg_len(u8 *data, u32 data_len)
+{
+ u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN +
+ sizeof(struct mt76x0_rxwi) + MT_FCE_INFO_LEN;
+ u16 dma_len = get_unaligned_le16(data);
+
+ if (data_len < min_seg_len ||
+ WARN_ON(!dma_len) ||
+ WARN_ON(dma_len + MT_DMA_HDRS > data_len) ||
+ WARN_ON(dma_len & 0x3))
+ return 0;
+
+ return MT_DMA_HDRS + dma_len;
+}
+
+static void
+mt76x0_rx_process_entry(struct mt76x0_dev *dev, struct mt76x0_dma_buf_rx *e)
+{
+ u32 seg_len, data_len = e->urb->actual_length;
+ u8 *data = page_address(e->p);
+ struct page *new_p = NULL;
+ int cnt = 0;
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
+ return;
+
+ /* Copy if there is very little data in the buffer. */
+ if (data_len > 512)
+ new_p = dev_alloc_pages(MT_RX_ORDER);
+
+ while ((seg_len = mt76x0_rx_next_seg_len(data, data_len))) {
+ mt76x0_rx_process_seg(dev, data, seg_len, new_p ? e->p : NULL);
+
+ data_len -= seg_len;
+ data += seg_len;
+ cnt++;
+ }
+
+ if (cnt > 1)
+ trace_mt76x0_rx_dma_aggr(&dev->mt76, cnt, !!new_p);
+
+ if (new_p) {
+ /* we have one extra ref from the allocator */
+ __free_pages(e->p, MT_RX_ORDER);
+
+ e->p = new_p;
+ }
+}
+
+static struct mt76x0_dma_buf_rx *
+mt76x0_rx_get_pending_entry(struct mt76x0_dev *dev)
+{
+ struct mt76x0_rx_queue *q = &dev->rx_q;
+ struct mt76x0_dma_buf_rx *buf = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->rx_lock, flags);
+
+ if (!q->pending)
+ goto out;
+
+ buf = &q->e[q->start];
+ q->pending--;
+ q->start = (q->start + 1) % q->entries;
+out:
+ spin_unlock_irqrestore(&dev->rx_lock, flags);
+
+ return buf;
+}
+
+static void mt76x0_complete_rx(struct urb *urb)
+{
+ struct mt76x0_dev *dev = urb->context;
+ struct mt76x0_rx_queue *q = &dev->rx_q;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->rx_lock, flags);
+
+ if (mt76x0_urb_has_error(urb))
+ dev_err(dev->mt76.dev, "Error: RX urb failed:%d\n", urb->status);
+ if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
+ goto out;
+
+ q->end = (q->end + 1) % q->entries;
+ q->pending++;
+ tasklet_schedule(&dev->rx_tasklet);
+out:
+ spin_unlock_irqrestore(&dev->rx_lock, flags);
+}
+
+static void mt76x0_rx_tasklet(unsigned long data)
+{
+ struct mt76x0_dev *dev = (struct mt76x0_dev *) data;
+ struct mt76x0_dma_buf_rx *e;
+
+ while ((e = mt76x0_rx_get_pending_entry(dev))) {
+ if (e->urb->status)
+ continue;
+
+ mt76x0_rx_process_entry(dev, e);
+ mt76x0_submit_rx_buf(dev, e, GFP_ATOMIC);
+ }
+}
+
+static void mt76x0_complete_tx(struct urb *urb)
+{
+ struct mt76x0_tx_queue *q = urb->context;
+ struct mt76x0_dev *dev = q->dev;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
+
+ if (mt76x0_urb_has_error(urb))
+ dev_err(dev->mt76.dev, "Error: TX urb failed:%d\n", urb->status);
+ if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
+ goto out;
+
+ skb = q->e[q->start].skb;
+ trace_mt76x0_tx_dma_done(&dev->mt76, skb);
+
+ __skb_queue_tail(&dev->tx_skb_done, skb);
+ tasklet_schedule(&dev->tx_tasklet);
+
+ if (q->used == q->entries - q->entries / 8)
+ ieee80211_wake_queue(dev->mt76.hw, skb_get_queue_mapping(skb));
+
+ q->start = (q->start + 1) % q->entries;
+ q->used--;
+out:
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
+
+static void mt76x0_tx_tasklet(unsigned long data)
+{
+ struct mt76x0_dev *dev = (struct mt76x0_dev *) data;
+ struct sk_buff_head skbs;
+ unsigned long flags;
+
+ __skb_queue_head_init(&skbs);
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
+
+ set_bit(MT76_MORE_STATS, &dev->mt76.state);
+ if (!test_and_set_bit(MT76_READING_STATS, &dev->mt76.state))
+ queue_delayed_work(dev->stat_wq, &dev->stat_work,
+ msecs_to_jiffies(10));
+
+ skb_queue_splice_init(&dev->tx_skb_done, &skbs);
+
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+
+ while (!skb_queue_empty(&skbs)) {
+ struct sk_buff *skb = __skb_dequeue(&skbs);
+
+ mt76x0_tx_status(dev, skb);
+ }
+}
+
+static int mt76x0_dma_submit_tx(struct mt76x0_dev *dev,
+ struct sk_buff *skb, u8 ep)
+{
+ struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+ unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_ep[ep]);
+ struct mt76x0_dma_buf_tx *e;
+ struct mt76x0_tx_queue *q = &dev->tx_q[ep];
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
+
+ if (WARN_ON_ONCE(q->entries <= q->used)) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ e = &q->e[q->end];
+ e->skb = skb;
+ usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
+ mt76x0_complete_tx, q);
+ ret = usb_submit_urb(e->urb, GFP_ATOMIC);
+ if (ret) {
+ /* Special-handle ENODEV from TX urb submission because it will
+ * often be the first ENODEV we see after device is removed.
+ */
+ if (ret == -ENODEV)
+ set_bit(MT76_REMOVED, &dev->mt76.state);
+ else
+ dev_err(dev->mt76.dev, "Error: TX urb submit failed:%d\n",
+ ret);
+ goto out;
+ }
+
+ q->end = (q->end + 1) % q->entries;
+ q->used++;
+
+ if (q->used >= q->entries)
+ ieee80211_stop_queue(dev->mt76.hw, skb_get_queue_mapping(skb));
+out:
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+
+ return ret;
+}
+
+/* Map USB endpoint number to Q id in the DMA engine */
+static enum mt76_qsel ep2dmaq(u8 ep)
+{
+ if (ep == 5)
+ return MT_QSEL_MGMT;
+ return MT_QSEL_EDCA;
+}
+
+int mt76x0_dma_enqueue_tx(struct mt76x0_dev *dev, struct sk_buff *skb,
+ struct mt76_wcid *wcid, int hw_q)
+{
+ u8 ep = q2ep(hw_q);
+ u32 dma_flags;
+ int ret;
+
+ dma_flags = MT_TXD_PKT_INFO_80211;
+ if (wcid->hw_key_idx == 0xff)
+ dma_flags |= MT_TXD_PKT_INFO_WIV;
+
+ ret = mt76x0_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags);
+ if (ret)
+ return ret;
+
+ ret = mt76x0_dma_submit_tx(dev, skb, ep);
+
+ if (ret) {
+ ieee80211_free_txskb(dev->mt76.hw, skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mt76x0_kill_rx(struct mt76x0_dev *dev)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->rx_lock, flags);
+
+ for (i = 0; i < dev->rx_q.entries; i++) {
+ int next = dev->rx_q.end;
+
+ spin_unlock_irqrestore(&dev->rx_lock, flags);
+ usb_poison_urb(dev->rx_q.e[next].urb);
+ spin_lock_irqsave(&dev->rx_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&dev->rx_lock, flags);
+}
+
+static int mt76x0_submit_rx_buf(struct mt76x0_dev *dev,
+ struct mt76x0_dma_buf_rx *e, gfp_t gfp)
+{
+ struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+ u8 *buf = page_address(e->p);
+ unsigned pipe;
+ int ret;
+
+ pipe = usb_rcvbulkpipe(usb_dev, dev->in_ep[MT_EP_IN_PKT_RX]);
+
+ usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE,
+ mt76x0_complete_rx, dev);
+
+ trace_mt76x0_submit_urb(&dev->mt76, e->urb);
+ ret = usb_submit_urb(e->urb, gfp);
+ if (ret)
+ dev_err(dev->mt76.dev, "Error: submit RX URB failed:%d\n", ret);
+
+ return ret;
+}
+
+static int mt76x0_submit_rx(struct mt76x0_dev *dev)
+{
+ int i, ret;
+
+ for (i = 0; i < dev->rx_q.entries; i++) {
+ ret = mt76x0_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mt76x0_free_rx(struct mt76x0_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->rx_q.entries; i++) {
+ __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
+ usb_free_urb(dev->rx_q.e[i].urb);
+ }
+}
+
+static int mt76x0_alloc_rx(struct mt76x0_dev *dev)
+{
+ int i;
+
+ memset(&dev->rx_q, 0, sizeof(dev->rx_q));
+ dev->rx_q.dev = dev;
+ dev->rx_q.entries = N_RX_ENTRIES;
+
+ for (i = 0; i < N_RX_ENTRIES; i++) {
+ dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
+ dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
+
+ if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void mt76x0_free_tx_queue(struct mt76x0_tx_queue *q)
+{
+ int i;
+
+ WARN_ON(q->used);
+
+ for (i = 0; i < q->entries; i++) {
+ usb_poison_urb(q->e[i].urb);
+ usb_free_urb(q->e[i].urb);
+ }
+}
+
+static void mt76x0_free_tx(struct mt76x0_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < __MT_EP_OUT_MAX; i++)
+ mt76x0_free_tx_queue(&dev->tx_q[i]);
+}
+
+static int mt76x0_alloc_tx_queue(struct mt76x0_dev *dev,
+ struct mt76x0_tx_queue *q)
+{
+ int i;
+
+ q->dev = dev;
+ q->entries = N_TX_ENTRIES;
+
+ for (i = 0; i < N_TX_ENTRIES; i++) {
+ q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!q->e[i].urb)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int mt76x0_alloc_tx(struct mt76x0_dev *dev)
+{
+ int i;
+
+ dev->tx_q = devm_kcalloc(dev->mt76.dev, __MT_EP_OUT_MAX,
+ sizeof(*dev->tx_q), GFP_KERNEL);
+
+ for (i = 0; i < __MT_EP_OUT_MAX; i++)
+ if (mt76x0_alloc_tx_queue(dev, &dev->tx_q[i]))
+ return -ENOMEM;
+
+ return 0;
+}
+
+int mt76x0_dma_init(struct mt76x0_dev *dev)
+{
+ int ret = -ENOMEM;
+
+ tasklet_init(&dev->tx_tasklet, mt76x0_tx_tasklet, (unsigned long) dev);
+ tasklet_init(&dev->rx_tasklet, mt76x0_rx_tasklet, (unsigned long) dev);
+
+ ret = mt76x0_alloc_tx(dev);
+ if (ret)
+ goto err;
+ ret = mt76x0_alloc_rx(dev);
+ if (ret)
+ goto err;
+
+ ret = mt76x0_submit_rx(dev);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ mt76x0_dma_cleanup(dev);
+ return ret;
+}
+
+void mt76x0_dma_cleanup(struct mt76x0_dev *dev)
+{
+ mt76x0_kill_rx(dev);
+
+ tasklet_kill(&dev->rx_tasklet);
+
+ mt76x0_free_rx(dev);
+ mt76x0_free_tx(dev);
+
+ tasklet_kill(&dev->tx_tasklet);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h
new file mode 100644
index 000000000000..891ce1c3461f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76X0U_DMA_H
+#define __MT76X0U_DMA_H
+
+#include <asm/unaligned.h>
+#include <linux/skbuff.h>
+
+#define MT_DMA_HDR_LEN 4
+#define MT_RX_INFO_LEN 4
+#define MT_FCE_INFO_LEN 4
+#define MT_DMA_HDRS (MT_DMA_HDR_LEN + MT_RX_INFO_LEN)
+
+/* Common Tx DMA descriptor fields */
+#define MT_TXD_INFO_LEN GENMASK(15, 0)
+#define MT_TXD_INFO_D_PORT GENMASK(29, 27)
+#define MT_TXD_INFO_TYPE GENMASK(31, 30)
+
+/* Tx DMA MCU command specific flags */
+#define MT_TXD_CMD_SEQ GENMASK(19, 16)
+#define MT_TXD_CMD_TYPE GENMASK(26, 20)
+
+enum mt76_msg_port {
+ WLAN_PORT,
+ CPU_RX_PORT,
+ CPU_TX_PORT,
+ HOST_PORT,
+ VIRTUAL_CPU_RX_PORT,
+ VIRTUAL_CPU_TX_PORT,
+ DISCARD,
+};
+
+enum mt76_info_type {
+ DMA_PACKET,
+ DMA_COMMAND,
+};
+
+/* Tx DMA packet specific flags */
+#define MT_TXD_PKT_INFO_NEXT_VLD BIT(16)
+#define MT_TXD_PKT_INFO_TX_BURST BIT(17)
+#define MT_TXD_PKT_INFO_80211 BIT(19)
+#define MT_TXD_PKT_INFO_TSO BIT(20)
+#define MT_TXD_PKT_INFO_CSO BIT(21)
+#define MT_TXD_PKT_INFO_WIV BIT(24)
+#define MT_TXD_PKT_INFO_QSEL GENMASK(26, 25)
+
+enum mt76_qsel {
+ MT_QSEL_MGMT,
+ MT_QSEL_HCCA,
+ MT_QSEL_EDCA,
+ MT_QSEL_EDCA_2,
+};
+
+
+static inline int mt76x0_dma_skb_wrap(struct sk_buff *skb,
+ enum mt76_msg_port d_port,
+ enum mt76_info_type type, u32 flags)
+{
+ u32 info;
+
+ /* Buffer layout:
+ * | 4B | xfer len | pad | 4B |
+ * | TXINFO | pkt/cmd | zero pad to 4B | zero |
+ *
+ * length field of TXINFO should be set to 'xfer len'.
+ */
+
+ info = flags |
+ FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
+ FIELD_PREP(MT_TXD_INFO_D_PORT, d_port) |
+ FIELD_PREP(MT_TXD_INFO_TYPE, type);
+
+ put_unaligned_le32(info, skb_push(skb, sizeof(info)));
+ return skb_put_padto(skb, round_up(skb->len, 4) + 4);
+}
+
+static inline int
+mt76x0_dma_skb_wrap_pkt(struct sk_buff *skb, enum mt76_qsel qsel, u32 flags)
+{
+ flags |= FIELD_PREP(MT_TXD_PKT_INFO_QSEL, qsel);
+ return mt76x0_dma_skb_wrap(skb, WLAN_PORT, DMA_PACKET, flags);
+}
+
+/* Common Rx DMA descriptor fields */
+#define MT_RXD_INFO_LEN GENMASK(13, 0)
+#define MT_RXD_INFO_PCIE_INTR BIT(24)
+#define MT_RXD_INFO_QSEL GENMASK(26, 25)
+#define MT_RXD_INFO_PORT GENMASK(29, 27)
+#define MT_RXD_INFO_TYPE GENMASK(31, 30)
+
+/* Rx DMA packet specific flags */
+#define MT_RXD_PKT_INFO_UDP_ERR BIT(16)
+#define MT_RXD_PKT_INFO_TCP_ERR BIT(17)
+#define MT_RXD_PKT_INFO_IP_ERR BIT(18)
+#define MT_RXD_PKT_INFO_PKT_80211 BIT(19)
+#define MT_RXD_PKT_INFO_L3L4_DONE BIT(20)
+#define MT_RXD_PKT_INFO_MAC_LEN GENMASK(23, 21)
+
+/* Rx DMA MCU command specific flags */
+#define MT_RXD_CMD_INFO_SELF_GEN BIT(15)
+#define MT_RXD_CMD_INFO_CMD_SEQ GENMASK(19, 16)
+#define MT_RXD_CMD_INFO_EVT_TYPE GENMASK(23, 20)
+
+enum mt76_evt_type {
+ CMD_DONE,
+ CMD_ERROR,
+ CMD_RETRY,
+ EVENT_PWR_RSP,
+ EVENT_WOW_RSP,
+ EVENT_CARRIER_DETECT_RSP,
+ EVENT_DFS_DETECT_RSP,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
new file mode 100644
index 000000000000..36da1e6bc21a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -0,0 +1,445 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+#include "mt76x0.h"
+#include "eeprom.h"
+
+static bool
+field_valid(u8 val)
+{
+ return val != 0xff;
+}
+
+static s8
+field_validate(u8 val)
+{
+ if (!field_valid(val))
+ return 0;
+
+ return val;
+}
+
+static inline int
+sign_extend(u32 val, unsigned int size)
+{
+ bool sign = val & BIT(size - 1);
+
+ val &= BIT(size - 1) - 1;
+
+ return sign ? val : -val;
+}
+
+static int
+mt76x0_efuse_read(struct mt76x0_dev *dev, u16 addr, u8 *data,
+ enum mt76x0_eeprom_access_modes mode)
+{
+ u32 val;
+ int i;
+
+ val = mt76_rr(dev, MT_EFUSE_CTRL);
+ val &= ~(MT_EFUSE_CTRL_AIN |
+ MT_EFUSE_CTRL_MODE);
+ val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf) |
+ FIELD_PREP(MT_EFUSE_CTRL_MODE, mode) |
+ MT_EFUSE_CTRL_KICK;
+ mt76_wr(dev, MT_EFUSE_CTRL, val);
+
+ if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
+ return -ETIMEDOUT;
+
+ val = mt76_rr(dev, MT_EFUSE_CTRL);
+ if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
+ /* Parts of eeprom not in the usage map (0x80-0xc0,0xf0)
+ * will not return valid data but it's ok.
+ */
+ memset(data, 0xff, 16);
+ return 0;
+ }
+
+ for (i = 0; i < 4; i++) {
+ val = mt76_rr(dev, MT_EFUSE_DATA(i));
+ put_unaligned_le32(val, data + 4 * i);
+ }
+
+ return 0;
+}
+
+#define MT_MAP_READS DIV_ROUND_UP(MT_EFUSE_USAGE_MAP_SIZE, 16)
+static int
+mt76x0_efuse_physical_size_check(struct mt76x0_dev *dev)
+{
+ u8 data[MT_MAP_READS * 16];
+ int ret, i;
+ u32 start = 0, end = 0, cnt_free;
+
+ for (i = 0; i < MT_MAP_READS; i++) {
+ ret = mt76x0_efuse_read(dev, MT_EE_USAGE_MAP_START + i * 16,
+ data + i * 16, MT_EE_PHYSICAL_READ);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < MT_EFUSE_USAGE_MAP_SIZE; i++)
+ if (!data[i]) {
+ if (!start)
+ start = MT_EE_USAGE_MAP_START + i;
+ end = MT_EE_USAGE_MAP_START + i;
+ }
+ cnt_free = end - start + 1;
+
+ if (MT_EFUSE_USAGE_MAP_SIZE - cnt_free < 5) {
+ dev_err(dev->mt76.dev, "Error: your device needs default EEPROM file and this driver doesn't support it!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+mt76x0_set_chip_cap(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ enum mt76x2_board_type { BOARD_TYPE_2GHZ = 1, BOARD_TYPE_5GHZ = 2 };
+ u16 nic_conf0 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_0);
+ u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
+
+ dev_dbg(dev->mt76.dev, "NIC_CONF0: %04x NIC_CONF1: %04x\n", nic_conf0, nic_conf1);
+
+ switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, nic_conf0)) {
+ case BOARD_TYPE_5GHZ:
+ dev->ee->has_5ghz = true;
+ break;
+ case BOARD_TYPE_2GHZ:
+ dev->ee->has_2ghz = true;
+ break;
+ default:
+ dev->ee->has_2ghz = true;
+ dev->ee->has_5ghz = true;
+ break;
+ }
+
+ dev_dbg(dev->mt76.dev, "Has 2GHZ %d 5GHZ %d\n", dev->ee->has_2ghz, dev->ee->has_5ghz);
+
+ if (!field_valid(nic_conf1 & 0xff))
+ nic_conf1 &= 0xff00;
+
+ if (nic_conf1 & MT_EE_NIC_CONF_1_HW_RF_CTRL)
+ dev_err(dev->mt76.dev,
+ "Error: this driver does not support HW RF ctrl\n");
+
+ if (!field_valid(nic_conf0 >> 8))
+ return;
+
+ if (FIELD_GET(MT_EE_NIC_CONF_0_RX_PATH, nic_conf0) > 1 ||
+ FIELD_GET(MT_EE_NIC_CONF_0_TX_PATH, nic_conf0) > 1)
+ dev_err(dev->mt76.dev,
+ "Error: device has more than 1 RX/TX stream!\n");
+
+ dev->ee->pa_type = FIELD_GET(MT_EE_NIC_CONF_0_PA_TYPE, nic_conf0);
+ dev_dbg(dev->mt76.dev, "PA Type %d\n", dev->ee->pa_type);
+}
+
+static int
+mt76x0_set_macaddr(struct mt76x0_dev *dev, const u8 *eeprom)
+{
+ const void *src = eeprom + MT_EE_MAC_ADDR;
+
+ ether_addr_copy(dev->macaddr, src);
+
+ if (!is_valid_ether_addr(dev->macaddr)) {
+ eth_random_addr(dev->macaddr);
+ dev_info(dev->mt76.dev,
+ "Invalid MAC address, using random address %pM\n",
+ dev->macaddr);
+ }
+
+ mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr));
+ mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(dev->macaddr + 4) |
+ FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
+
+ return 0;
+}
+
+static void
+mt76x0_set_temp_offset(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ u8 temp = eeprom[MT_EE_TEMP_OFFSET];
+
+ if (field_valid(temp))
+ dev->ee->temp_off = sign_extend(temp, 8);
+ else
+ dev->ee->temp_off = -10;
+}
+
+static void
+mt76x0_set_country_reg(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ /* Note: - region 31 is not valid for mt76x0 (see rtmp_init.c)
+ * - comments in rtmp_def.h are incorrect (see rt_channel.c)
+ */
+ static const struct reg_channel_bounds chan_bounds[] = {
+ /* EEPROM country regions 0 - 7 */
+ { 1, 11 }, { 1, 13 }, { 10, 2 }, { 10, 4 },
+ { 14, 1 }, { 1, 14 }, { 3, 7 }, { 5, 9 },
+ /* EEPROM country regions 32 - 33 */
+ { 1, 11 }, { 1, 14 }
+ };
+ u8 val = eeprom[MT_EE_COUNTRY_REGION_2GHZ];
+ int idx = -1;
+
+ dev_dbg(dev->mt76.dev, "REG 2GHZ %u REG 5GHZ %u\n", val, eeprom[MT_EE_COUNTRY_REGION_5GHZ]);
+ if (val < 8)
+ idx = val;
+ if (val > 31 && val < 33)
+ idx = val - 32 + 8;
+
+ if (idx != -1)
+ dev_info(dev->mt76.dev,
+ "EEPROM country region %02hhx (channels %hhd-%hhd)\n",
+ val, chan_bounds[idx].start,
+ chan_bounds[idx].start + chan_bounds[idx].num - 1);
+ else
+ idx = 5; /* channels 1 - 14 */
+
+ dev->ee->reg = chan_bounds[idx];
+
+ /* TODO: country region 33 is special - phy should be set to B-mode
+ * before entering channel 14 (see sta/connect.c)
+ */
+}
+
+static void
+mt76x0_set_rf_freq_off(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ u8 comp;
+
+ dev->ee->rf_freq_off = field_validate(eeprom[MT_EE_FREQ_OFFSET]);
+ comp = field_validate(eeprom[MT_EE_FREQ_OFFSET_COMPENSATION]);
+
+ if (comp & BIT(7))
+ dev->ee->rf_freq_off -= comp & 0x7f;
+ else
+ dev->ee->rf_freq_off += comp;
+}
+
+static void
+mt76x0_set_lna_gain(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ u8 gain;
+
+ dev->ee->lna_gain_2ghz = eeprom[MT_EE_LNA_GAIN_2GHZ];
+ dev->ee->lna_gain_5ghz[0] = eeprom[MT_EE_LNA_GAIN_5GHZ_0];
+
+ gain = eeprom[MT_EE_LNA_GAIN_5GHZ_1];
+ if (gain == 0xff || gain == 0)
+ dev->ee->lna_gain_5ghz[1] = dev->ee->lna_gain_5ghz[0];
+ else
+ dev->ee->lna_gain_5ghz[1] = gain;
+
+ gain = eeprom[MT_EE_LNA_GAIN_5GHZ_2];
+ if (gain == 0xff || gain == 0)
+ dev->ee->lna_gain_5ghz[2] = dev->ee->lna_gain_5ghz[0];
+ else
+ dev->ee->lna_gain_5ghz[2] = gain;
+}
+
+static void
+mt76x0_set_rssi_offset(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ int i;
+ s8 *rssi_offset = dev->ee->rssi_offset_2ghz;
+
+ for (i = 0; i < 2; i++) {
+ rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET + i];
+
+ if (rssi_offset[i] < -10 || rssi_offset[i] > 10) {
+ dev_warn(dev->mt76.dev,
+ "Warning: EEPROM RSSI is invalid %02hhx\n",
+ rssi_offset[i]);
+ rssi_offset[i] = 0;
+ }
+ }
+
+ rssi_offset = dev->ee->rssi_offset_5ghz;
+
+ for (i = 0; i < 3; i++) {
+ rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET_5GHZ + i];
+
+ if (rssi_offset[i] < -10 || rssi_offset[i] > 10) {
+ dev_warn(dev->mt76.dev,
+ "Warning: EEPROM RSSI is invalid %02hhx\n",
+ rssi_offset[i]);
+ rssi_offset[i] = 0;
+ }
+ }
+}
+
+static u32
+calc_bw40_power_rate(u32 value, int delta)
+{
+ u32 ret = 0;
+ int i, tmp;
+
+ for (i = 0; i < 4; i++) {
+ tmp = s6_to_int((value >> i*8) & 0xff) + delta;
+ ret |= (u32)(int_to_s6(tmp)) << i*8;
+ }
+
+ return ret;
+}
+
+static s8
+get_delta(u8 val)
+{
+ s8 ret;
+
+ if (!field_valid(val) || !(val & BIT(7)))
+ return 0;
+
+ ret = val & 0x1f;
+ if (ret > 8)
+ ret = 8;
+ if (val & BIT(6))
+ ret = -ret;
+
+ return ret;
+}
+
+static void
+mt76x0_set_tx_power_per_rate(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ s8 bw40_delta_2g, bw40_delta_5g;
+ u32 val;
+ int i;
+
+ bw40_delta_2g = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40]);
+ bw40_delta_5g = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40 + 1]);
+
+ for (i = 0; i < 5; i++) {
+ val = get_unaligned_le32(eeprom + MT_EE_TX_POWER_BYRATE(i));
+
+ /* Skip last 16 bits. */
+ if (i == 4)
+ val &= 0x0000ffff;
+
+ dev->ee->tx_pwr_cfg_2g[i][0] = val;
+ dev->ee->tx_pwr_cfg_2g[i][1] = calc_bw40_power_rate(val, bw40_delta_2g);
+ }
+
+ /* Reading per rate tx power for 5 GHz band is a bit more complex. Note
+ * we mix 16 bit and 32 bit reads and sometimes do shifts.
+ */
+ val = get_unaligned_le16(eeprom + 0x120);
+ val <<= 16;
+ dev->ee->tx_pwr_cfg_5g[0][0] = val;
+ dev->ee->tx_pwr_cfg_5g[0][1] = calc_bw40_power_rate(val, bw40_delta_5g);
+
+ val = get_unaligned_le32(eeprom + 0x122);
+ dev->ee->tx_pwr_cfg_5g[1][0] = val;
+ dev->ee->tx_pwr_cfg_5g[1][1] = calc_bw40_power_rate(val, bw40_delta_5g);
+
+ val = get_unaligned_le16(eeprom + 0x126);
+ dev->ee->tx_pwr_cfg_5g[2][0] = val;
+ dev->ee->tx_pwr_cfg_5g[2][1] = calc_bw40_power_rate(val, bw40_delta_5g);
+
+ val = get_unaligned_le16(eeprom + 0xec);
+ val <<= 16;
+ dev->ee->tx_pwr_cfg_5g[3][0] = val;
+ dev->ee->tx_pwr_cfg_5g[3][1] = calc_bw40_power_rate(val, bw40_delta_5g);
+
+ val = get_unaligned_le16(eeprom + 0xee);
+ dev->ee->tx_pwr_cfg_5g[4][0] = val;
+ dev->ee->tx_pwr_cfg_5g[4][1] = calc_bw40_power_rate(val, bw40_delta_5g);
+}
+
+static void
+mt76x0_set_tx_power_per_chan(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ int i;
+ u8 tx_pwr;
+
+ for (i = 0; i < 14; i++) {
+ tx_pwr = eeprom[MT_EE_TX_POWER_OFFSET_2GHZ + i];
+ if (tx_pwr <= 0x3f && tx_pwr > 0)
+ dev->ee->tx_pwr_per_chan[i] = tx_pwr;
+ else
+ dev->ee->tx_pwr_per_chan[i] = 5;
+ }
+
+ for (i = 0; i < 40; i++) {
+ tx_pwr = eeprom[MT_EE_TX_POWER_OFFSET_5GHZ + i];
+ if (tx_pwr <= 0x3f && tx_pwr > 0)
+ dev->ee->tx_pwr_per_chan[14 + i] = tx_pwr;
+ else
+ dev->ee->tx_pwr_per_chan[14 + i] = 5;
+ }
+
+ dev->ee->tx_pwr_per_chan[54] = dev->ee->tx_pwr_per_chan[22];
+ dev->ee->tx_pwr_per_chan[55] = dev->ee->tx_pwr_per_chan[28];
+ dev->ee->tx_pwr_per_chan[56] = dev->ee->tx_pwr_per_chan[34];
+ dev->ee->tx_pwr_per_chan[57] = dev->ee->tx_pwr_per_chan[44];
+}
+
+int
+mt76x0_eeprom_init(struct mt76x0_dev *dev)
+{
+ u8 *eeprom;
+ int i, ret;
+
+ ret = mt76x0_efuse_physical_size_check(dev);
+ if (ret)
+ return ret;
+
+ dev->ee = devm_kzalloc(dev->mt76.dev, sizeof(*dev->ee), GFP_KERNEL);
+ if (!dev->ee)
+ return -ENOMEM;
+
+ eeprom = kmalloc(MT76X0_EEPROM_SIZE, GFP_KERNEL);
+ if (!eeprom)
+ return -ENOMEM;
+
+ for (i = 0; i + 16 <= MT76X0_EEPROM_SIZE; i += 16) {
+ ret = mt76x0_efuse_read(dev, i, eeprom + i, MT_EE_READ);
+ if (ret)
+ goto out;
+ }
+
+ if (eeprom[MT_EE_VERSION_EE] > MT76X0U_EE_MAX_VER)
+ dev_warn(dev->mt76.dev,
+ "Warning: unsupported EEPROM version %02hhx\n",
+ eeprom[MT_EE_VERSION_EE]);
+ dev_info(dev->mt76.dev, "EEPROM ver:%02hhx fae:%02hhx\n",
+ eeprom[MT_EE_VERSION_EE], eeprom[MT_EE_VERSION_FAE]);
+
+ mt76x0_set_macaddr(dev, eeprom);
+ mt76x0_set_chip_cap(dev, eeprom);
+ mt76x0_set_country_reg(dev, eeprom);
+ mt76x0_set_rf_freq_off(dev, eeprom);
+ mt76x0_set_temp_offset(dev, eeprom);
+ mt76x0_set_lna_gain(dev, eeprom);
+ mt76x0_set_rssi_offset(dev, eeprom);
+ dev->chainmask = 0x0101;
+
+ mt76x0_set_tx_power_per_rate(dev, eeprom);
+ mt76x0_set_tx_power_per_chan(dev, eeprom);
+
+out:
+ kfree(eeprom);
+ return ret;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
new file mode 100644
index 000000000000..e37b573aed7b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76X0U_EEPROM_H
+#define __MT76X0U_EEPROM_H
+
+struct mt76x0_dev;
+
+#define MT76X0U_EE_MAX_VER 0x0c
+#define MT76X0_EEPROM_SIZE 512
+
+#define MT76X0U_DEFAULT_TX_POWER 6
+
+enum mt76_eeprom_field {
+ MT_EE_CHIP_ID = 0x00,
+ MT_EE_VERSION_FAE = 0x02,
+ MT_EE_VERSION_EE = 0x03,
+ MT_EE_MAC_ADDR = 0x04,
+ MT_EE_NIC_CONF_0 = 0x34,
+ MT_EE_NIC_CONF_1 = 0x36,
+ MT_EE_COUNTRY_REGION_5GHZ = 0x38,
+ MT_EE_COUNTRY_REGION_2GHZ = 0x39,
+ MT_EE_FREQ_OFFSET = 0x3a,
+ MT_EE_NIC_CONF_2 = 0x42,
+
+ MT_EE_LNA_GAIN_2GHZ = 0x44,
+ MT_EE_LNA_GAIN_5GHZ_0 = 0x45,
+ MT_EE_RSSI_OFFSET = 0x46,
+ MT_EE_RSSI_OFFSET_5GHZ = 0x4a,
+ MT_EE_LNA_GAIN_5GHZ_1 = 0x49,
+ MT_EE_LNA_GAIN_5GHZ_2 = 0x4d,
+
+ MT_EE_TX_POWER_DELTA_BW40 = 0x50,
+
+ MT_EE_TX_POWER_OFFSET_2GHZ = 0x52,
+
+ MT_EE_TX_TSSI_SLOPE = 0x6e,
+ MT_EE_TX_TSSI_OFFSET_GROUP = 0x6f,
+ MT_EE_TX_TSSI_OFFSET = 0x76,
+
+ MT_EE_TX_POWER_OFFSET_5GHZ = 0x78,
+
+ MT_EE_TEMP_OFFSET = 0xd1,
+ MT_EE_FREQ_OFFSET_COMPENSATION = 0xdb,
+ MT_EE_TX_POWER_BYRATE_BASE = 0xde,
+
+ MT_EE_TX_POWER_BYRATE_BASE_5GHZ = 0x120,
+
+ MT_EE_USAGE_MAP_START = 0x1e0,
+ MT_EE_USAGE_MAP_END = 0x1fc,
+};
+
+#define MT_EE_NIC_CONF_0_RX_PATH GENMASK(3, 0)
+#define MT_EE_NIC_CONF_0_TX_PATH GENMASK(7, 4)
+#define MT_EE_NIC_CONF_0_PA_TYPE GENMASK(9, 8)
+#define MT_EE_NIC_CONF_0_BOARD_TYPE GENMASK(13, 12)
+
+#define MT_EE_NIC_CONF_1_HW_RF_CTRL BIT(0)
+#define MT_EE_NIC_CONF_1_TEMP_TX_ALC BIT(1)
+#define MT_EE_NIC_CONF_1_LNA_EXT_2G BIT(2)
+#define MT_EE_NIC_CONF_1_LNA_EXT_5G BIT(3)
+#define MT_EE_NIC_CONF_1_TX_ALC_EN BIT(13)
+
+#define MT_EE_NIC_CONF_2_RX_STREAM GENMASK(3, 0)
+#define MT_EE_NIC_CONF_2_TX_STREAM GENMASK(7, 4)
+#define MT_EE_NIC_CONF_2_HW_ANTDIV BIT(8)
+#define MT_EE_NIC_CONF_2_XTAL_OPTION GENMASK(10, 9)
+#define MT_EE_NIC_CONF_2_TEMP_DISABLE BIT(11)
+#define MT_EE_NIC_CONF_2_COEX_METHOD GENMASK(15, 13)
+
+#define MT_EE_TX_POWER_BYRATE(i) (MT_EE_TX_POWER_BYRATE_BASE + \
+ (i) * 4)
+
+#define MT_EFUSE_USAGE_MAP_SIZE (MT_EE_USAGE_MAP_END - \
+ MT_EE_USAGE_MAP_START + 1)
+
+enum mt76x0_eeprom_access_modes {
+ MT_EE_READ = 0,
+ MT_EE_PHYSICAL_READ = 1,
+};
+
+struct reg_channel_bounds {
+ u8 start;
+ u8 num;
+};
+
+struct mt76x0_eeprom_params {
+ u8 rf_freq_off;
+ s16 temp_off;
+ s8 rssi_offset_2ghz[2];
+ s8 rssi_offset_5ghz[3];
+ s8 lna_gain_2ghz;
+ s8 lna_gain_5ghz[3];
+ u8 pa_type;
+
+ /* TX_PWR_CFG_* values from EEPROM for 20 and 40 Mhz bandwidths. */
+ u32 tx_pwr_cfg_2g[5][2];
+ u32 tx_pwr_cfg_5g[5][2];
+
+ u8 tx_pwr_per_chan[58];
+
+ struct reg_channel_bounds reg;
+
+ bool has_2ghz;
+ bool has_5ghz;
+};
+
+int mt76x0_eeprom_init(struct mt76x0_dev *dev);
+
+static inline u32 s6_validate(u32 reg)
+{
+ WARN_ON(reg & ~GENMASK(5, 0));
+ return reg & GENMASK(5, 0);
+}
+
+static inline int s6_to_int(u32 reg)
+{
+ int s6;
+
+ s6 = s6_validate(reg);
+ if (s6 & BIT(5))
+ s6 -= BIT(6);
+
+ return s6;
+}
+
+static inline u32 int_to_s6(int val)
+{
+ if (val < -0x20)
+ return 0x20;
+ if (val > 0x1f)
+ return 0x1f;
+
+ return val & 0x3f;
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
new file mode 100644
index 000000000000..7cdb3e740522
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
@@ -0,0 +1,720 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+#include "eeprom.h"
+#include "trace.h"
+#include "mcu.h"
+#include "usb.h"
+
+#include "initvals.h"
+
+static void
+mt76x0_set_wlan_state(struct mt76x0_dev *dev, u32 val, bool enable)
+{
+ int i;
+
+ /* Note: we don't turn off WLAN_CLK because that makes the device
+ * not respond properly on the probe path.
+ * In case anyone (PSM?) wants to use this function we can
+ * bring the clock stuff back and fixup the probe path.
+ */
+
+ if (enable)
+ val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
+ MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+ else
+ val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN);
+
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ if (!enable)
+ return;
+
+ for (i = 200; i; i--) {
+ val = mt76_rr(dev, MT_CMB_CTRL);
+
+ if (val & MT_CMB_CTRL_XTAL_RDY && val & MT_CMB_CTRL_PLL_LD)
+ break;
+
+ udelay(20);
+ }
+
+ /* Note: vendor driver tries to disable/enable wlan here and retry
+ * but the code which does it is so buggy it must have never
+ * triggered, so don't bother.
+ */
+ if (!i)
+ dev_err(dev->mt76.dev, "Error: PLL and XTAL check failed!\n");
+}
+
+void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset)
+{
+ u32 val;
+
+ mutex_lock(&dev->hw_atomic_mutex);
+
+ val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+ if (reset) {
+ val |= MT_WLAN_FUN_CTRL_GPIO_OUT_EN;
+ val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
+
+ if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
+ val |= (MT_WLAN_FUN_CTRL_WLAN_RESET |
+ MT_WLAN_FUN_CTRL_WLAN_RESET_RF);
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ val &= ~(MT_WLAN_FUN_CTRL_WLAN_RESET |
+ MT_WLAN_FUN_CTRL_WLAN_RESET_RF);
+ }
+ }
+
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ mt76x0_set_wlan_state(dev, val, enable);
+
+ mutex_unlock(&dev->hw_atomic_mutex);
+}
+
+static void mt76x0_reset_csr_bbp(struct mt76x0_dev *dev)
+{
+ u32 val;
+
+ val = mt76_rr(dev, MT_PBF_SYS_CTRL);
+ val &= ~0x2000;
+ mt76_wr(dev, MT_PBF_SYS_CTRL, val);
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR |
+ MT_MAC_SYS_CTRL_RESET_BBP);
+
+ msleep(200);
+}
+
+static void mt76x0_init_usb_dma(struct mt76x0_dev *dev)
+{
+ u32 val;
+
+ val = mt76_rr(dev, MT_USB_DMA_CFG);
+
+ val |= FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, MT_USB_AGGR_TIMEOUT) |
+ FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_LMT, MT_USB_AGGR_SIZE_LIMIT) |
+ MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN;
+ if (dev->in_max_packet == 512)
+ val |= MT_USB_DMA_CFG_RX_BULK_AGG_EN;
+ mt76_wr(dev, MT_USB_DMA_CFG, val);
+
+ val = mt76_rr(dev, MT_COM_REG0);
+ if (val & 1)
+ dev_dbg(dev->mt76.dev, "MCU not ready\n");
+
+ val = mt76_rr(dev, MT_USB_DMA_CFG);
+
+ val |= MT_USB_DMA_CFG_RX_DROP_OR_PADDING;
+ mt76_wr(dev, MT_USB_DMA_CFG, val);
+ val &= ~MT_USB_DMA_CFG_RX_DROP_OR_PADDING;
+ mt76_wr(dev, MT_USB_DMA_CFG, val);
+}
+
+#define RANDOM_WRITE(dev, tab) \
+ mt76x0_write_reg_pairs(dev, MT_MCU_MEMMAP_WLAN, tab, ARRAY_SIZE(tab));
+
+static int mt76x0_init_bbp(struct mt76x0_dev *dev)
+{
+ int ret, i;
+
+ ret = mt76x0_wait_bbp_ready(dev);
+ if (ret)
+ return ret;
+
+ RANDOM_WRITE(dev, mt76x0_bbp_init_tab);
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_bbp_switch_tab); i++) {
+ const struct mt76x0_bbp_switch_item *item = &mt76x0_bbp_switch_tab[i];
+ const struct mt76_reg_pair *pair = &item->reg_pair;
+
+ if (((RF_G_BAND | RF_BW_20) & item->bw_band) == (RF_G_BAND | RF_BW_20))
+ mt76_wr(dev, pair->reg, pair->value);
+ }
+
+ RANDOM_WRITE(dev, mt76x0_dcoc_tab);
+
+ return 0;
+}
+
+static void
+mt76_init_beacon_offsets(struct mt76x0_dev *dev)
+{
+ u16 base = MT_BEACON_BASE;
+ u32 regs[4] = {};
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ u16 addr = dev->beacon_offsets[i];
+
+ regs[i / 4] |= ((addr - base) / 64) << (8 * (i % 4));
+ }
+
+ for (i = 0; i < 4; i++)
+ mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
+}
+
+static void mt76x0_init_mac_registers(struct mt76x0_dev *dev)
+{
+ u32 reg;
+
+ RANDOM_WRITE(dev, common_mac_reg_table);
+
+ mt76_init_beacon_offsets(dev);
+
+ /* Enable PBF and MAC clock SYS_CTRL[11:10] = 0x3 */
+ RANDOM_WRITE(dev, mt76x0_mac_reg_table);
+
+ /* Release BBP and MAC reset MAC_SYS_CTRL[1:0] = 0x0 */
+ reg = mt76_rr(dev, MT_MAC_SYS_CTRL);
+ reg &= ~0x3;
+ mt76_wr(dev, MT_MAC_SYS_CTRL, reg);
+
+ if (is_mt7610e(dev)) {
+ /* Disable COEX_EN */
+ reg = mt76_rr(dev, MT_COEXCFG0);
+ reg &= 0xFFFFFFFE;
+ mt76_wr(dev, MT_COEXCFG0, reg);
+ }
+
+ /* Set 0x141C[15:12]=0xF */
+ reg = mt76_rr(dev, MT_EXT_CCA_CFG);
+ reg |= 0x0000F000;
+ mt76_wr(dev, MT_EXT_CCA_CFG, reg);
+
+ mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
+
+ /*
+ TxRing 9 is for Mgmt frame.
+ TxRing 8 is for In-band command frame.
+ WMM_RG0_TXQMA: This register setting is for FCE to define the rule of TxRing 9.
+ WMM_RG1_TXQMA: This register setting is for FCE to define the rule of TxRing 8.
+ */
+ reg = mt76_rr(dev, MT_WMM_CTRL);
+ reg &= ~0x000003FF;
+ reg |= 0x00000201;
+ mt76_wr(dev, MT_WMM_CTRL, reg);
+
+ /* TODO: Probably not needed */
+ mt76_wr(dev, 0x7028, 0);
+ mt76_wr(dev, 0x7010, 0);
+ mt76_wr(dev, 0x7024, 0);
+ msleep(10);
+}
+
+static int mt76x0_init_wcid_mem(struct mt76x0_dev *dev)
+{
+ u32 *vals;
+ int i, ret;
+
+ vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL);
+ if (!vals)
+ return -ENOMEM;
+
+ for (i = 0; i < N_WCIDS; i++) {
+ vals[i * 2] = 0xffffffff;
+ vals[i * 2 + 1] = 0x00ffffff;
+ }
+
+ ret = mt76x0_burst_write_regs(dev, MT_WCID_ADDR_BASE,
+ vals, N_WCIDS * 2);
+ kfree(vals);
+
+ return ret;
+}
+
+static int mt76x0_init_key_mem(struct mt76x0_dev *dev)
+{
+ u32 vals[4] = {};
+
+ return mt76x0_burst_write_regs(dev, MT_SKEY_MODE_BASE_0,
+ vals, ARRAY_SIZE(vals));
+}
+
+static int mt76x0_init_wcid_attr_mem(struct mt76x0_dev *dev)
+{
+ u32 *vals;
+ int i, ret;
+
+ vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL);
+ if (!vals)
+ return -ENOMEM;
+
+ for (i = 0; i < N_WCIDS * 2; i++)
+ vals[i] = 1;
+
+ ret = mt76x0_burst_write_regs(dev, MT_WCID_ATTR_BASE,
+ vals, N_WCIDS * 2);
+ kfree(vals);
+
+ return ret;
+}
+
+static void mt76x0_reset_counters(struct mt76x0_dev *dev)
+{
+ mt76_rr(dev, MT_RX_STA_CNT0);
+ mt76_rr(dev, MT_RX_STA_CNT1);
+ mt76_rr(dev, MT_RX_STA_CNT2);
+ mt76_rr(dev, MT_TX_STA_CNT0);
+ mt76_rr(dev, MT_TX_STA_CNT1);
+ mt76_rr(dev, MT_TX_STA_CNT2);
+}
+
+int mt76x0_mac_start(struct mt76x0_dev *dev)
+{
+ mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+
+ if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 200000))
+ return -ETIMEDOUT;
+
+ dev->rxfilter = MT_RX_FILTR_CFG_CRC_ERR |
+ MT_RX_FILTR_CFG_PHY_ERR | MT_RX_FILTR_CFG_PROMISC |
+ MT_RX_FILTR_CFG_VER_ERR | MT_RX_FILTR_CFG_DUP |
+ MT_RX_FILTR_CFG_CFACK | MT_RX_FILTR_CFG_CFEND |
+ MT_RX_FILTR_CFG_ACK | MT_RX_FILTR_CFG_CTS |
+ MT_RX_FILTR_CFG_RTS | MT_RX_FILTR_CFG_PSPOLL |
+ MT_RX_FILTR_CFG_BA | MT_RX_FILTR_CFG_CTRL_RSV;
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
+
+ if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 50))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void mt76x0_mac_stop_hw(struct mt76x0_dev *dev)
+{
+ int i, ok;
+
+ if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ return;
+
+ mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_BEACON_TX);
+
+ if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000))
+ dev_warn(dev->mt76.dev, "Warning: TX DMA did not stop!\n");
+
+ /* Page count on TxQ */
+ i = 200;
+ while (i-- && ((mt76_rr(dev, 0x0438) & 0xffffffff) ||
+ (mt76_rr(dev, 0x0a30) & 0x000000ff) ||
+ (mt76_rr(dev, 0x0a34) & 0x00ff00ff)))
+ msleep(10);
+
+ if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX, 0, 1000))
+ dev_warn(dev->mt76.dev, "Warning: MAC TX did not stop!\n");
+
+ mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_RX |
+ MT_MAC_SYS_CTRL_ENABLE_TX);
+
+ /* Page count on RxQ */
+ ok = 0;
+ i = 200;
+ while (i--) {
+ if (!(mt76_rr(dev, MT_RXQ_STA) & 0x00ff0000) &&
+ !mt76_rr(dev, 0x0a30) &&
+ !mt76_rr(dev, 0x0a34)) {
+ if (ok++ > 5)
+ break;
+ continue;
+ }
+ msleep(1);
+ }
+
+ if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 1000))
+ dev_warn(dev->mt76.dev, "Warning: MAC RX did not stop!\n");
+
+ if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_RX_BUSY, 0, 1000))
+ dev_warn(dev->mt76.dev, "Warning: RX DMA did not stop!\n");
+}
+
+void mt76x0_mac_stop(struct mt76x0_dev *dev)
+{
+ mt76x0_mac_stop_hw(dev);
+ flush_delayed_work(&dev->stat_work);
+ cancel_delayed_work_sync(&dev->stat_work);
+}
+
+static void mt76x0_stop_hardware(struct mt76x0_dev *dev)
+{
+ mt76x0_chip_onoff(dev, false, false);
+}
+
+int mt76x0_init_hardware(struct mt76x0_dev *dev)
+{
+ static const u16 beacon_offsets[16] = {
+ /* 512 byte per beacon */
+ 0xc000, 0xc200, 0xc400, 0xc600,
+ 0xc800, 0xca00, 0xcc00, 0xce00,
+ 0xd000, 0xd200, 0xd400, 0xd600,
+ 0xd800, 0xda00, 0xdc00, 0xde00
+ };
+ int ret;
+
+ dev->beacon_offsets = beacon_offsets;
+
+ mt76x0_chip_onoff(dev, true, true);
+
+ ret = mt76x0_wait_asic_ready(dev);
+ if (ret)
+ goto err;
+ ret = mt76x0_mcu_init(dev);
+ if (ret)
+ goto err;
+
+ if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100)) {
+ ret = -EIO;
+ goto err;
+ }
+
+ /* Wait for ASIC ready after FW load. */
+ ret = mt76x0_wait_asic_ready(dev);
+ if (ret)
+ goto err;
+
+ mt76x0_reset_csr_bbp(dev);
+ mt76x0_init_usb_dma(dev);
+
+ mt76_wr(dev, MT_HEADER_TRANS_CTRL_REG, 0x0);
+ mt76_wr(dev, MT_TSO_CTRL, 0x0);
+
+ ret = mt76x0_mcu_cmd_init(dev);
+ if (ret)
+ goto err;
+ ret = mt76x0_dma_init(dev);
+ if (ret)
+ goto err_mcu;
+
+ mt76x0_init_mac_registers(dev);
+
+ if (!mt76_poll_msec(dev, MT_MAC_STATUS,
+ MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, 0, 1000)) {
+ ret = -EIO;
+ goto err_rx;
+ }
+
+ ret = mt76x0_init_bbp(dev);
+ if (ret)
+ goto err_rx;
+
+ ret = mt76x0_init_wcid_mem(dev);
+ if (ret)
+ goto err_rx;
+ ret = mt76x0_init_key_mem(dev);
+ if (ret)
+ goto err_rx;
+ ret = mt76x0_init_wcid_attr_mem(dev);
+ if (ret)
+ goto err_rx;
+
+ mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE |
+ MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_BEACON_TX));
+
+ mt76x0_reset_counters(dev);
+
+ mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
+
+ mt76_wr(dev, MT_TXOP_CTRL_CFG,
+ FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
+ FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
+
+ ret = mt76x0_eeprom_init(dev);
+ if (ret)
+ goto err_rx;
+
+ mt76x0_phy_init(dev);
+ return 0;
+
+err_rx:
+ mt76x0_dma_cleanup(dev);
+err_mcu:
+ mt76x0_mcu_cmd_deinit(dev);
+err:
+ mt76x0_chip_onoff(dev, false, false);
+ return ret;
+}
+
+void mt76x0_cleanup(struct mt76x0_dev *dev)
+{
+ if (!test_and_clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
+ return;
+
+ mt76x0_stop_hardware(dev);
+ mt76x0_dma_cleanup(dev);
+ mt76x0_mcu_cmd_deinit(dev);
+}
+
+struct mt76x0_dev *mt76x0_alloc_device(struct device *pdev)
+{
+ struct ieee80211_hw *hw;
+ struct mt76x0_dev *dev;
+
+ hw = ieee80211_alloc_hw(sizeof(*dev), &mt76x0_ops);
+ if (!hw)
+ return NULL;
+
+ dev = hw->priv;
+ dev->mt76.dev = pdev;
+ dev->mt76.hw = hw;
+ mutex_init(&dev->usb_ctrl_mtx);
+ mutex_init(&dev->reg_atomic_mutex);
+ mutex_init(&dev->hw_atomic_mutex);
+ mutex_init(&dev->mutex);
+ spin_lock_init(&dev->tx_lock);
+ spin_lock_init(&dev->rx_lock);
+ spin_lock_init(&dev->mt76.lock);
+ spin_lock_init(&dev->mac_lock);
+ spin_lock_init(&dev->con_mon_lock);
+ atomic_set(&dev->avg_ampdu_len, 1);
+ skb_queue_head_init(&dev->tx_skb_done);
+
+ dev->stat_wq = alloc_workqueue("mt76x0", WQ_UNBOUND, 0);
+ if (!dev->stat_wq) {
+ ieee80211_free_hw(hw);
+ return NULL;
+ }
+
+ return dev;
+}
+
+#define CHAN2G(_idx, _freq) { \
+ .band = NL80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 30, \
+}
+
+static const struct ieee80211_channel mt76_channels_2ghz[] = {
+ CHAN2G(1, 2412),
+ CHAN2G(2, 2417),
+ CHAN2G(3, 2422),
+ CHAN2G(4, 2427),
+ CHAN2G(5, 2432),
+ CHAN2G(6, 2437),
+ CHAN2G(7, 2442),
+ CHAN2G(8, 2447),
+ CHAN2G(9, 2452),
+ CHAN2G(10, 2457),
+ CHAN2G(11, 2462),
+ CHAN2G(12, 2467),
+ CHAN2G(13, 2472),
+ CHAN2G(14, 2484),
+};
+
+#define CHAN5G(_idx, _freq) { \
+ .band = NL80211_BAND_5GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 30, \
+}
+
+static const struct ieee80211_channel mt76_channels_5ghz[] = {
+ CHAN5G(36, 5180),
+ CHAN5G(40, 5200),
+ CHAN5G(44, 5220),
+ CHAN5G(46, 5230),
+ CHAN5G(48, 5240),
+ CHAN5G(52, 5260),
+ CHAN5G(56, 5280),
+ CHAN5G(60, 5300),
+ CHAN5G(64, 5320),
+
+ CHAN5G(100, 5500),
+ CHAN5G(104, 5520),
+ CHAN5G(108, 5540),
+ CHAN5G(112, 5560),
+ CHAN5G(116, 5580),
+ CHAN5G(120, 5600),
+ CHAN5G(124, 5620),
+ CHAN5G(128, 5640),
+ CHAN5G(132, 5660),
+ CHAN5G(136, 5680),
+ CHAN5G(140, 5700),
+};
+
+#define CCK_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
+ .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \
+ .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \
+}
+
+#define OFDM_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \
+ .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \
+}
+
+static struct ieee80211_rate mt76_rates[] = {
+ CCK_RATE(0, 10),
+ CCK_RATE(1, 20),
+ CCK_RATE(2, 55),
+ CCK_RATE(3, 110),
+ OFDM_RATE(0, 60),
+ OFDM_RATE(1, 90),
+ OFDM_RATE(2, 120),
+ OFDM_RATE(3, 180),
+ OFDM_RATE(4, 240),
+ OFDM_RATE(5, 360),
+ OFDM_RATE(6, 480),
+ OFDM_RATE(7, 540),
+};
+
+static int
+mt76_init_sband(struct mt76x0_dev *dev, struct ieee80211_supported_band *sband,
+ const struct ieee80211_channel *chan, int n_chan,
+ struct ieee80211_rate *rates, int n_rates)
+{
+ struct ieee80211_sta_ht_cap *ht_cap;
+ void *chanlist;
+ int size;
+
+ size = n_chan * sizeof(*chan);
+ chanlist = devm_kmemdup(dev->mt76.dev, chan, size, GFP_KERNEL);
+ if (!chanlist)
+ return -ENOMEM;
+
+ sband->channels = chanlist;
+ sband->n_channels = n_chan;
+ sband->bitrates = rates;
+ sband->n_bitrates = n_rates;
+
+ ht_cap = &sband->ht_cap;
+ ht_cap->ht_supported = true;
+ ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40 |
+ (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+
+ ht_cap->mcs.rx_mask[0] = 0xff;
+ ht_cap->mcs.rx_mask[4] = 0x1;
+ ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_2;
+
+ return 0;
+}
+
+static int
+mt76_init_sband_2g(struct mt76x0_dev *dev)
+{
+ dev->mt76.hw->wiphy->bands[NL80211_BAND_2GHZ] = &dev->mt76.sband_2g.sband;
+
+ WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num >
+ ARRAY_SIZE(mt76_channels_2ghz));
+
+
+ return mt76_init_sband(dev, &dev->mt76.sband_2g.sband,
+ mt76_channels_2ghz, ARRAY_SIZE(mt76_channels_2ghz),
+ mt76_rates, ARRAY_SIZE(mt76_rates));
+}
+
+static int
+mt76_init_sband_5g(struct mt76x0_dev *dev)
+{
+ dev->mt76.hw->wiphy->bands[NL80211_BAND_5GHZ] = &dev->mt76.sband_5g.sband;
+
+ return mt76_init_sband(dev, &dev->mt76.sband_5g.sband,
+ mt76_channels_5ghz, ARRAY_SIZE(mt76_channels_5ghz),
+ mt76_rates + 4, ARRAY_SIZE(mt76_rates) - 4);
+}
+
+
+int mt76x0_register_device(struct mt76x0_dev *dev)
+{
+ struct ieee80211_hw *hw = dev->mt76.hw;
+ struct wiphy *wiphy = hw->wiphy;
+ int ret;
+
+ /* Reserve WCID 0 for mcast - thanks to this APs WCID will go to
+ * entry no. 1 like it does in the vendor driver.
+ */
+ dev->wcid_mask[0] |= 1;
+
+ /* init fake wcid for monitor interfaces */
+ dev->mon_wcid = devm_kmalloc(dev->mt76.dev, sizeof(*dev->mon_wcid),
+ GFP_KERNEL);
+ if (!dev->mon_wcid)
+ return -ENOMEM;
+ dev->mon_wcid->idx = 0xff;
+ dev->mon_wcid->hw_key_idx = -1;
+
+ SET_IEEE80211_DEV(hw, dev->mt76.dev);
+
+ hw->queues = 4;
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+ hw->max_rates = 1;
+ hw->max_report_rates = 7;
+ hw->max_rate_tries = 1;
+
+ hw->sta_data_size = sizeof(struct mt76_sta);
+ hw->vif_data_size = sizeof(struct mt76_vif);
+
+ SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
+
+ wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+
+ if (dev->ee->has_2ghz) {
+ ret = mt76_init_sband_2g(dev);
+ if (ret)
+ return ret;
+ }
+
+ if (dev->ee->has_5ghz) {
+ ret = mt76_init_sband_5g(dev);
+ if (ret)
+ return ret;
+ }
+
+ dev->mt76.chandef.chan = &dev->mt76.sband_2g.sband.channels[0];
+
+ INIT_DELAYED_WORK(&dev->mac_work, mt76x0_mac_work);
+ INIT_DELAYED_WORK(&dev->stat_work, mt76x0_tx_stat);
+
+ ret = ieee80211_register_hw(hw);
+ if (ret)
+ return ret;
+
+ mt76x0_init_debugfs(dev);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
new file mode 100644
index 000000000000..24afcfd94b4e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
@@ -0,0 +1,282 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76X0U_INITVALS_H
+#define __MT76X0U_INITVALS_H
+
+#include "phy.h"
+
+static const struct mt76_reg_pair common_mac_reg_table[] = {
+#if 1
+ {MT_BCN_OFFSET(0), 0xf8f0e8e0}, /* 0x3800(e0), 0x3A00(e8), 0x3C00(f0), 0x3E00(f8), 512B for each beacon */
+ {MT_BCN_OFFSET(1), 0x6f77d0c8}, /* 0x3200(c8), 0x3400(d0), 0x1DC0(77), 0x1BC0(6f), 512B for each beacon */
+#endif
+
+ {MT_LEGACY_BASIC_RATE, 0x0000013f}, /* Basic rate set bitmap*/
+ {MT_HT_BASIC_RATE, 0x00008003}, /* Basic HT rate set , 20M, MCS=3, MM. Format is the same as in TXWI.*/
+ {MT_MAC_SYS_CTRL, 0x00}, /* 0x1004, , default Disable RX*/
+ {MT_RX_FILTR_CFG, 0x17f97}, /*0x1400 , RX filter control, */
+ {MT_BKOFF_SLOT_CFG, 0x209}, /* default set short slot time, CC_DELAY_TIME should be 2 */
+ /*{TX_SW_CFG0, 0x40a06}, Gary,2006-08-23 */
+ {MT_TX_SW_CFG0, 0x0}, /* Gary,2008-05-21 for CWC test */
+ {MT_TX_SW_CFG1, 0x80606}, /* Gary,2006-08-23 */
+ {MT_TX_LINK_CFG, 0x1020}, /* Gary,2006-08-23 */
+ /*{TX_TIMEOUT_CFG, 0x00182090}, CCK has some problem. So increase timieout value. 2006-10-09 MArvek RT*/
+ {MT_TX_TIMEOUT_CFG, 0x000a2090}, /* CCK has some problem. So increase timieout value. 2006-10-09 MArvek RT , Modify for 2860E ,2007-08-01*/
+ {MT_MAX_LEN_CFG, 0xa0fff | 0x00001000}, /* 0x3018, MAX frame length. Max PSDU = 16kbytes.*/
+ {MT_LED_CFG, 0x7f031e46}, /* Gary, 2006-08-23*/
+
+ {MT_PBF_TX_MAX_PCNT, 0x1fbf1f1f /*0xbfbf3f1f*/},
+ {MT_PBF_RX_MAX_PCNT, 0x9f},
+
+ /*{TX_RTY_CFG, 0x6bb80408}, Jan, 2006/11/16*/
+/* WMM_ACM_SUPPORT */
+/* {TX_RTY_CFG, 0x6bb80101}, sample*/
+ {MT_TX_RETRY_CFG, 0x47d01f0f}, /* Jan, 2006/11/16, Set TxWI->ACK =0 in Probe Rsp Modify for 2860E ,2007-08-03*/
+
+ {MT_AUTO_RSP_CFG, 0x00000013}, /* Initial Auto_Responder, because QA will turn off Auto-Responder*/
+ {MT_CCK_PROT_CFG, 0x05740003 /*0x01740003*/}, /* Initial Auto_Responder, because QA will turn off Auto-Responder. And RTS threshold is enabled. */
+ {MT_OFDM_PROT_CFG, 0x05740003 /*0x01740003*/}, /* Initial Auto_Responder, because QA will turn off Auto-Responder. And RTS threshold is enabled. */
+ {MT_PBF_CFG, 0xf40006}, /* Only enable Queue 2*/
+ {MT_MM40_PROT_CFG, 0x3F44084}, /* Initial Auto_Responder, because QA will turn off Auto-Responder*/
+ {MT_WPDMA_GLO_CFG, 0x00000030},
+ {MT_GF20_PROT_CFG, 0x01744004}, /* set 19:18 --> Short NAV for MIMO PS*/
+ {MT_GF40_PROT_CFG, 0x03F44084},
+ {MT_MM20_PROT_CFG, 0x01744004},
+ {MT_TXOP_CTRL_CFG, 0x0000583f, /*0x0000243f*/ /*0x000024bf*/}, /*Extension channel backoff.*/
+ {MT_TX_RTS_CFG, 0x00092b20},
+
+ {MT_EXP_ACK_TIME, 0x002400ca}, /* default value */
+ {MT_TXOP_HLDR_ET, 0x00000002},
+
+ /* Jerry comments 2008/01/16: we use SIFS = 10us in CCK defaultly, but it seems that 10us
+ is too small for INTEL 2200bg card, so in MBSS mode, the delta time between beacon0
+ and beacon1 is SIFS (10us), so if INTEL 2200bg card connects to BSS0, the ping
+ will always lost. So we change the SIFS of CCK from 10us to 16us. */
+ {MT_XIFS_TIME_CFG, 0x33a41010},
+ {MT_PWR_PIN_CFG, 0x00000000},
+};
+
+static const struct mt76_reg_pair mt76x0_mac_reg_table[] = {
+ /* {MT_IOCFG_6, 0xA0040080 }, */
+ {MT_PBF_SYS_CTRL, 0x00080c00 },
+ {MT_PBF_CFG, 0x77723c1f },
+ {MT_FCE_PSE_CTRL, 0x00000001 },
+
+ {MT_AMPDU_MAX_LEN_20M1S, 0xBAA99887 },
+
+ /* Delay bb_tx_pe for proper tx_mcs_pwr update */
+ {MT_TX_SW_CFG0, 0x00000601 },
+
+ /* Set rf_tx_pe deassert time to 1us by Chee's comment @MT7650_CR_setting_1018.xlsx */
+ {MT_TX_SW_CFG1, 0x00040000 },
+ {MT_TX_SW_CFG2, 0x00000000 },
+
+ /* disable Tx info report */
+ {0xa44, 0x0000000 },
+
+ {MT_HEADER_TRANS_CTRL_REG, 0x0},
+ {MT_TSO_CTRL, 0x0},
+
+ /* BB_PA_MODE_CFG0(0x1214) Keep default value @20120903 */
+ {MT_BB_PA_MODE_CFG1, 0x00500055},
+
+ /* RF_PA_MODE_CFG0(0x121C) Keep default value @20120903 */
+ {MT_RF_PA_MODE_CFG1, 0x00500055},
+
+ {MT_TX_ALC_CFG_0, 0x2F2F000C},
+ {MT_TX0_BB_GAIN_ATTEN, 0x00000000}, /* set BBP atten gain = 0 */
+
+ {MT_TX_PWR_CFG_0, 0x3A3A3A3A},
+ {MT_TX_PWR_CFG_1, 0x3A3A3A3A},
+ {MT_TX_PWR_CFG_2, 0x3A3A3A3A},
+ {MT_TX_PWR_CFG_3, 0x3A3A3A3A},
+ {MT_TX_PWR_CFG_4, 0x3A3A3A3A},
+ {MT_TX_PWR_CFG_7, 0x3A3A3A3A},
+ {MT_TX_PWR_CFG_8, 0x3A},
+ {MT_TX_PWR_CFG_9, 0x3A},
+ /* Enable Tx length > 4095 byte */
+ {0x150C, 0x00000002},
+
+ /* Disable bt_abort_tx_en(0x1238[21] = 0) which is not used at MT7650 */
+ {0x1238, 0x001700C8},
+ /* PMU_OCLEVEL<5:1> from default <5'b10010> to <5'b11011> for normal driver */
+ /* {MT_LDO_CTRL_0, 0x00A647B6}, */
+
+ /* Default LDO_DIG supply 1.26V, change to 1.2V */
+ {MT_LDO_CTRL_1, 0x6B006464 },
+/*
+ {MT_HT_BASIC_RATE, 0x00004003 },
+ {MT_HT_CTRL_CFG, 0x000001FF },
+*/
+};
+
+
+static const struct mt76_reg_pair mt76x0_bbp_init_tab[] = {
+ {MT_BBP(CORE, 1), 0x00000002},
+ {MT_BBP(CORE, 4), 0x00000000},
+ {MT_BBP(CORE, 24), 0x00000000},
+ {MT_BBP(CORE, 32), 0x4003000a},
+ {MT_BBP(CORE, 42), 0x00000000},
+ {MT_BBP(CORE, 44), 0x00000000},
+
+ {MT_BBP(IBI, 11), 0x00000080},
+
+ /*
+ 0x2300[5] Default Antenna:
+ 0 for WIFI main antenna
+ 1 for WIFI aux antenna
+
+ */
+ {MT_BBP(AGC, 0), 0x00021400},
+ {MT_BBP(AGC, 1), 0x00000003},
+ {MT_BBP(AGC, 2), 0x003A6464},
+ {MT_BBP(AGC, 15), 0x88A28CB8},
+ {MT_BBP(AGC, 22), 0x00001E21},
+ {MT_BBP(AGC, 23), 0x0000272C},
+ {MT_BBP(AGC, 24), 0x00002F3A},
+ {MT_BBP(AGC, 25), 0x8000005A},
+ {MT_BBP(AGC, 26), 0x007C2005},
+ {MT_BBP(AGC, 34), 0x000A0C0C},
+ {MT_BBP(AGC, 37), 0x2121262C},
+ {MT_BBP(AGC, 41), 0x38383E45},
+ {MT_BBP(AGC, 57), 0x00001010},
+ {MT_BBP(AGC, 59), 0xBAA20E96},
+ {MT_BBP(AGC, 63), 0x00000001},
+
+ {MT_BBP(TXC, 0), 0x00280403},
+ {MT_BBP(TXC, 1), 0x00000000},
+
+ {MT_BBP(RXC, 1), 0x00000012},
+ {MT_BBP(RXC, 2), 0x00000011},
+ {MT_BBP(RXC, 3), 0x00000005},
+ {MT_BBP(RXC, 4), 0x00000000},
+ {MT_BBP(RXC, 5), 0xF977C4EC},
+ {MT_BBP(RXC, 7), 0x00000090},
+
+ {MT_BBP(TXO, 8), 0x00000000},
+
+ {MT_BBP(TXBE, 0), 0x00000000},
+ {MT_BBP(TXBE, 4), 0x00000004},
+ {MT_BBP(TXBE, 6), 0x00000000},
+ {MT_BBP(TXBE, 8), 0x00000014},
+ {MT_BBP(TXBE, 9), 0x20000000},
+ {MT_BBP(TXBE, 10), 0x00000000},
+ {MT_BBP(TXBE, 12), 0x00000000},
+ {MT_BBP(TXBE, 13), 0x00000000},
+ {MT_BBP(TXBE, 14), 0x00000000},
+ {MT_BBP(TXBE, 15), 0x00000000},
+ {MT_BBP(TXBE, 16), 0x00000000},
+ {MT_BBP(TXBE, 17), 0x00000000},
+
+ {MT_BBP(RXFE, 1), 0x00008800}, /* Add for E3 */
+ {MT_BBP(RXFE, 3), 0x00000000},
+ {MT_BBP(RXFE, 4), 0x00000000},
+
+ {MT_BBP(RXO, 13), 0x00000092},
+ {MT_BBP(RXO, 14), 0x00060612},
+ {MT_BBP(RXO, 15), 0xC8321B18},
+ {MT_BBP(RXO, 16), 0x0000001E},
+ {MT_BBP(RXO, 17), 0x00000000},
+ {MT_BBP(RXO, 18), 0xCC00A993},
+ {MT_BBP(RXO, 19), 0xB9CB9CB9},
+ {MT_BBP(RXO, 20), 0x26c00057},
+ {MT_BBP(RXO, 21), 0x00000001},
+ {MT_BBP(RXO, 24), 0x00000006},
+};
+
+static const struct mt76x0_bbp_switch_item mt76x0_bbp_switch_tab[] = {
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 8), 0x0E344EF0}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 8), 0x122C54F2}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 14), 0x310F2E39}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 14), 0x310F2A3F}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 32), 0x00003230}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 32), 0x0000181C}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 33), 0x00003240}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 33), 0x00003218}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 35), 0x11112016}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 35), 0x11112016}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(RXO, 28), 0x0000008A}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(RXO, 28), 0x0000008A}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 4), 0x1FEDA049}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 4), 0x1FECA054}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 6), 0x00000045}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 6), 0x0000000A}},
+
+ {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 12), 0x05052879}},
+ {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 12), 0x050528F9}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 12), 0x050528F9}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 13), 0x35050004}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 13), 0x2C3A0406}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 27), 0x000000E1}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 27), 0x000000EC}},
+
+ {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 28), 0x00060806}},
+ {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 28), 0x00050806}},
+ {RF_A_BAND | RF_BW_40, {MT_BBP(AGC, 28), 0x00060801}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_80, {MT_BBP(AGC, 28), 0x00060806}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 31), 0x00000F23}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 31), 0x00000F13}},
+
+ {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 39), 0x2A2A3036}},
+ {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 39), 0x2A2A2C36}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 39), 0x2A2A3036}},
+ {RF_A_BAND | RF_BW_80, {MT_BBP(AGC, 39), 0x2A2A2A36}},
+
+ {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 43), 0x27273438}},
+ {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 43), 0x27272D38}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 43), 0x27272B30}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 51), 0x17171C1C}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 51), 0xFFFFFFFF}},
+
+ {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 53), 0x26262A2F}},
+ {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 53), 0x2626322F}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 53), 0xFFFFFFFF}},
+
+ {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 55), 0x40404E58}},
+ {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 55), 0x40405858}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 55), 0xFFFFFFFF}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 58), 0x00001010}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 58), 0x00000000}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(RXFE, 0), 0x3D5000E0}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(RXFE, 0), 0x895000E0}},
+};
+
+static const struct mt76_reg_pair mt76x0_dcoc_tab[] = {
+ {MT_BBP(CAL, 47), 0x000010F0 },
+ {MT_BBP(CAL, 48), 0x00008080 },
+ {MT_BBP(CAL, 49), 0x00000F07 },
+ {MT_BBP(CAL, 50), 0x00000040 },
+ {MT_BBP(CAL, 51), 0x00000404 },
+ {MT_BBP(CAL, 52), 0x00080803 },
+ {MT_BBP(CAL, 53), 0x00000704 },
+ {MT_BBP(CAL, 54), 0x00002828 },
+ {MT_BBP(CAL, 55), 0x00005050 },
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h
new file mode 100644
index 000000000000..95d43efc1f3d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h
@@ -0,0 +1,772 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76X0U_PHY_INITVALS_H
+#define __MT76X0U_PHY_INITVALS_H
+
+#define RF_REG_PAIR(bank, reg, value) \
+ { (bank) << 16 | (reg), value }
+
+
+static const struct mt76_reg_pair mt76x0_rf_central_tab[] = {
+/*
+ Bank 0 - For central blocks: BG, PLL, XTAL, LO, ADC/DAC
+*/
+ { MT_RF(0, 1), 0x01},
+ { MT_RF(0, 2), 0x11},
+
+ /*
+ R3 ~ R7: VCO Cal.
+ */
+ { MT_RF(0, 3), 0x73}, /* VCO Freq Cal - No Bypass, VCO Amp Cal - No Bypass */
+ { MT_RF(0, 4), 0x30}, /* R4 b<7>=1, VCO cal */
+ { MT_RF(0, 5), 0x00},
+ { MT_RF(0, 6), 0x41}, /* Set the open loop amplitude to middle since bypassing amplitude calibration */
+ { MT_RF(0, 7), 0x00},
+
+ /*
+ XO
+ */
+ { MT_RF(0, 8), 0x00},
+ { MT_RF(0, 9), 0x00},
+ { MT_RF(0, 10), 0x0C},
+ { MT_RF(0, 11), 0x00},
+ { MT_RF(0, 12), 0x00},
+
+ /*
+ BG
+ */
+ { MT_RF(0, 13), 0x00},
+ { MT_RF(0, 14), 0x00},
+ { MT_RF(0, 15), 0x00},
+
+ /*
+ LDO
+ */
+ { MT_RF(0, 19), 0x20},
+ /*
+ XO
+ */
+ { MT_RF(0, 20), 0x22},
+ { MT_RF(0, 21), 0x12},
+ { MT_RF(0, 23), 0x00},
+ { MT_RF(0, 24), 0x33}, /* See band selection for R24<1:0> */
+ { MT_RF(0, 25), 0x00},
+
+ /*
+ PLL, See Freq Selection
+ */
+ { MT_RF(0, 26), 0x00},
+ { MT_RF(0, 27), 0x00},
+ { MT_RF(0, 28), 0x00},
+ { MT_RF(0, 29), 0x00},
+ { MT_RF(0, 30), 0x00},
+ { MT_RF(0, 31), 0x00},
+ { MT_RF(0, 32), 0x00},
+ { MT_RF(0, 33), 0x00},
+ { MT_RF(0, 34), 0x00},
+ { MT_RF(0, 35), 0x00},
+ { MT_RF(0, 36), 0x00},
+ { MT_RF(0, 37), 0x00},
+
+ /*
+ LO Buffer
+ */
+ { MT_RF(0, 38), 0x2F},
+
+ /*
+ Test Ports
+ */
+ { MT_RF(0, 64), 0x00},
+ { MT_RF(0, 65), 0x80},
+ { MT_RF(0, 66), 0x01},
+ { MT_RF(0, 67), 0x04},
+
+ /*
+ ADC/DAC
+ */
+ { MT_RF(0, 68), 0x00},
+ { MT_RF(0, 69), 0x08},
+ { MT_RF(0, 70), 0x08},
+ { MT_RF(0, 71), 0x40},
+ { MT_RF(0, 72), 0xD0},
+ { MT_RF(0, 73), 0x93},
+};
+
+static const struct mt76_reg_pair mt76x0_rf_2g_channel_0_tab[] = {
+/*
+ Bank 5 - Channel 0 2G RF registers
+*/
+ /*
+ RX logic operation
+ */
+ /* RF_R00 Change in SelectBand6590 */
+
+ { MT_RF(5, 2), 0x0C}, /* 5G+2G (MT7610U) */
+ { MT_RF(5, 3), 0x00},
+
+ /*
+ TX logic operation
+ */
+ { MT_RF(5, 4), 0x00},
+ { MT_RF(5, 5), 0x84},
+ { MT_RF(5, 6), 0x02},
+
+ /*
+ LDO
+ */
+ { MT_RF(5, 7), 0x00},
+ { MT_RF(5, 8), 0x00},
+ { MT_RF(5, 9), 0x00},
+
+ /*
+ RX
+ */
+ { MT_RF(5, 10), 0x51},
+ { MT_RF(5, 11), 0x22},
+ { MT_RF(5, 12), 0x22},
+ { MT_RF(5, 13), 0x0F},
+ { MT_RF(5, 14), 0x47}, /* Increase mixer current for more gain */
+ { MT_RF(5, 15), 0x25},
+ { MT_RF(5, 16), 0xC7}, /* Tune LNA2 tank */
+ { MT_RF(5, 17), 0x00},
+ { MT_RF(5, 18), 0x00},
+ { MT_RF(5, 19), 0x30}, /* Improve max Pin */
+ { MT_RF(5, 20), 0x33},
+ { MT_RF(5, 21), 0x02},
+ { MT_RF(5, 22), 0x32}, /* Tune LNA1 tank */
+ { MT_RF(5, 23), 0x00},
+ { MT_RF(5, 24), 0x25},
+ { MT_RF(5, 26), 0x00},
+ { MT_RF(5, 27), 0x12},
+ { MT_RF(5, 28), 0x0F},
+ { MT_RF(5, 29), 0x00},
+
+ /*
+ LOGEN
+ */
+ { MT_RF(5, 30), 0x51}, /* Tune LOGEN tank */
+ { MT_RF(5, 31), 0x35},
+ { MT_RF(5, 32), 0x31},
+ { MT_RF(5, 33), 0x31},
+ { MT_RF(5, 34), 0x34},
+ { MT_RF(5, 35), 0x03},
+ { MT_RF(5, 36), 0x00},
+
+ /*
+ TX
+ */
+ { MT_RF(5, 37), 0xDD}, /* Improve 3.2GHz spur */
+ { MT_RF(5, 38), 0xB3},
+ { MT_RF(5, 39), 0x33},
+ { MT_RF(5, 40), 0xB1},
+ { MT_RF(5, 41), 0x71},
+ { MT_RF(5, 42), 0xF2},
+ { MT_RF(5, 43), 0x47},
+ { MT_RF(5, 44), 0x77},
+ { MT_RF(5, 45), 0x0E},
+ { MT_RF(5, 46), 0x10},
+ { MT_RF(5, 47), 0x00},
+ { MT_RF(5, 48), 0x53},
+ { MT_RF(5, 49), 0x03},
+ { MT_RF(5, 50), 0xEF},
+ { MT_RF(5, 51), 0xC7},
+ { MT_RF(5, 52), 0x62},
+ { MT_RF(5, 53), 0x62},
+ { MT_RF(5, 54), 0x00},
+ { MT_RF(5, 55), 0x00},
+ { MT_RF(5, 56), 0x0F},
+ { MT_RF(5, 57), 0x0F},
+ { MT_RF(5, 58), 0x16},
+ { MT_RF(5, 59), 0x16},
+ { MT_RF(5, 60), 0x10},
+ { MT_RF(5, 61), 0x10},
+ { MT_RF(5, 62), 0xD0},
+ { MT_RF(5, 63), 0x6C},
+ { MT_RF(5, 64), 0x58},
+ { MT_RF(5, 65), 0x58},
+ { MT_RF(5, 66), 0xF2},
+ { MT_RF(5, 67), 0xE8},
+ { MT_RF(5, 68), 0xF0},
+ { MT_RF(5, 69), 0xF0},
+ { MT_RF(5, 127), 0x04},
+};
+
+static const struct mt76_reg_pair mt76x0_rf_5g_channel_0_tab[] = {
+/*
+ Bank 6 - Channel 0 5G RF registers
+*/
+ /*
+ RX logic operation
+ */
+ /* RF_R00 Change in SelectBandmt76x0 */
+
+ { MT_RF(6, 2), 0x0C},
+ { MT_RF(6, 3), 0x00},
+
+ /*
+ TX logic operation
+ */
+ { MT_RF(6, 4), 0x00},
+ { MT_RF(6, 5), 0x84},
+ { MT_RF(6, 6), 0x02},
+
+ /*
+ LDO
+ */
+ { MT_RF(6, 7), 0x00},
+ { MT_RF(6, 8), 0x00},
+ { MT_RF(6, 9), 0x00},
+
+ /*
+ RX
+ */
+ { MT_RF(6, 10), 0x00},
+ { MT_RF(6, 11), 0x01},
+
+ { MT_RF(6, 13), 0x23},
+ { MT_RF(6, 14), 0x00},
+ { MT_RF(6, 15), 0x04},
+ { MT_RF(6, 16), 0x22},
+
+ { MT_RF(6, 18), 0x08},
+ { MT_RF(6, 19), 0x00},
+ { MT_RF(6, 20), 0x00},
+ { MT_RF(6, 21), 0x00},
+ { MT_RF(6, 22), 0xFB},
+
+ /*
+ LOGEN5G
+ */
+ { MT_RF(6, 25), 0x76},
+ { MT_RF(6, 26), 0x24},
+ { MT_RF(6, 27), 0x04},
+ { MT_RF(6, 28), 0x00},
+ { MT_RF(6, 29), 0x00},
+
+ /*
+ TX
+ */
+ { MT_RF(6, 37), 0xBB},
+ { MT_RF(6, 38), 0xB3},
+
+ { MT_RF(6, 40), 0x33},
+ { MT_RF(6, 41), 0x33},
+
+ { MT_RF(6, 43), 0x03},
+ { MT_RF(6, 44), 0xB3},
+
+ { MT_RF(6, 46), 0x17},
+ { MT_RF(6, 47), 0x0E},
+ { MT_RF(6, 48), 0x10},
+ { MT_RF(6, 49), 0x07},
+
+ { MT_RF(6, 62), 0x00},
+ { MT_RF(6, 63), 0x00},
+ { MT_RF(6, 64), 0xF1},
+ { MT_RF(6, 65), 0x0F},
+};
+
+static const struct mt76_reg_pair mt76x0_rf_vga_channel_0_tab[] = {
+/*
+ Bank 7 - Channel 0 VGA RF registers
+*/
+ /* E3 CR */
+ { MT_RF(7, 0), 0x47}, /* Allow BBP/MAC to do calibration */
+ { MT_RF(7, 1), 0x00},
+ { MT_RF(7, 2), 0x00},
+ { MT_RF(7, 3), 0x00},
+ { MT_RF(7, 4), 0x00},
+
+ { MT_RF(7, 10), 0x13},
+ { MT_RF(7, 11), 0x0F},
+ { MT_RF(7, 12), 0x13}, /* For dcoc */
+ { MT_RF(7, 13), 0x13}, /* For dcoc */
+ { MT_RF(7, 14), 0x13}, /* For dcoc */
+ { MT_RF(7, 15), 0x20}, /* For dcoc */
+ { MT_RF(7, 16), 0x22}, /* For dcoc */
+
+ { MT_RF(7, 17), 0x7C},
+
+ { MT_RF(7, 18), 0x00},
+ { MT_RF(7, 19), 0x00},
+ { MT_RF(7, 20), 0x00},
+ { MT_RF(7, 21), 0xF1},
+ { MT_RF(7, 22), 0x11},
+ { MT_RF(7, 23), 0xC2},
+ { MT_RF(7, 24), 0x41},
+ { MT_RF(7, 25), 0x20},
+ { MT_RF(7, 26), 0x40},
+ { MT_RF(7, 27), 0xD7},
+ { MT_RF(7, 28), 0xA2},
+ { MT_RF(7, 29), 0x60},
+ { MT_RF(7, 30), 0x49},
+ { MT_RF(7, 31), 0x20},
+ { MT_RF(7, 32), 0x44},
+ { MT_RF(7, 33), 0xC1},
+ { MT_RF(7, 34), 0x60},
+ { MT_RF(7, 35), 0xC0},
+
+ { MT_RF(7, 61), 0x01},
+
+ { MT_RF(7, 72), 0x3C},
+ { MT_RF(7, 73), 0x34},
+ { MT_RF(7, 74), 0x00},
+};
+
+static const struct mt76x0_rf_switch_item mt76x0_rf_bw_switch_tab[] = {
+ /* Bank, Register, Bw/Band, Value */
+ { MT_RF(0, 17), RF_G_BAND | RF_BW_20, 0x00},
+ { MT_RF(0, 17), RF_G_BAND | RF_BW_40, 0x00},
+ { MT_RF(0, 17), RF_A_BAND | RF_BW_20, 0x00},
+ { MT_RF(0, 17), RF_A_BAND | RF_BW_40, 0x00},
+ { MT_RF(0, 17), RF_A_BAND | RF_BW_80, 0x00},
+
+ /* TODO: need to check B7.R6 & B7.R7 setting for 2.4G again @20121112 */
+ { MT_RF(7, 6), RF_G_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 6), RF_G_BAND | RF_BW_40, 0x1C},
+ { MT_RF(7, 6), RF_A_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 6), RF_A_BAND | RF_BW_40, 0x20},
+ { MT_RF(7, 6), RF_A_BAND | RF_BW_80, 0x10},
+
+ { MT_RF(7, 7), RF_G_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 7), RF_G_BAND | RF_BW_40, 0x20},
+ { MT_RF(7, 7), RF_A_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 7), RF_A_BAND | RF_BW_40, 0x20},
+ { MT_RF(7, 7), RF_A_BAND | RF_BW_80, 0x10},
+
+ { MT_RF(7, 8), RF_G_BAND | RF_BW_20, 0x03},
+ { MT_RF(7, 8), RF_G_BAND | RF_BW_40, 0x01},
+ { MT_RF(7, 8), RF_A_BAND | RF_BW_20, 0x03},
+ { MT_RF(7, 8), RF_A_BAND | RF_BW_40, 0x01},
+ { MT_RF(7, 8), RF_A_BAND | RF_BW_80, 0x00},
+
+ /* TODO: need to check B7.R58 & B7.R59 setting for 2.4G again @20121112 */
+ { MT_RF(7, 58), RF_G_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 58), RF_G_BAND | RF_BW_40, 0x40},
+ { MT_RF(7, 58), RF_A_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 58), RF_A_BAND | RF_BW_40, 0x40},
+ { MT_RF(7, 58), RF_A_BAND | RF_BW_80, 0x10},
+
+ { MT_RF(7, 59), RF_G_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 59), RF_G_BAND | RF_BW_40, 0x40},
+ { MT_RF(7, 59), RF_A_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 59), RF_A_BAND | RF_BW_40, 0x40},
+ { MT_RF(7, 59), RF_A_BAND | RF_BW_80, 0x10},
+
+ { MT_RF(7, 60), RF_G_BAND | RF_BW_20, 0xAA},
+ { MT_RF(7, 60), RF_G_BAND | RF_BW_40, 0xAA},
+ { MT_RF(7, 60), RF_A_BAND | RF_BW_20, 0xAA},
+ { MT_RF(7, 60), RF_A_BAND | RF_BW_40, 0xAA},
+ { MT_RF(7, 60), RF_A_BAND | RF_BW_80, 0xAA},
+
+ { MT_RF(7, 76), RF_BW_20, 0x40},
+ { MT_RF(7, 76), RF_BW_40, 0x40},
+ { MT_RF(7, 76), RF_BW_80, 0x10},
+
+ { MT_RF(7, 77), RF_BW_20, 0x40},
+ { MT_RF(7, 77), RF_BW_40, 0x40},
+ { MT_RF(7, 77), RF_BW_80, 0x10},
+};
+
+static const struct mt76x0_rf_switch_item mt76x0_rf_band_switch_tab[] = {
+ /* Bank, Register, Bw/Band, Value */
+ { MT_RF(0, 16), RF_G_BAND, 0x20},
+ { MT_RF(0, 16), RF_A_BAND, 0x20},
+
+ { MT_RF(0, 18), RF_G_BAND, 0x00},
+ { MT_RF(0, 18), RF_A_BAND, 0x00},
+
+ { MT_RF(0, 39), RF_G_BAND, 0x36},
+ { MT_RF(0, 39), RF_A_BAND_LB, 0x34},
+ { MT_RF(0, 39), RF_A_BAND_MB, 0x33},
+ { MT_RF(0, 39), RF_A_BAND_HB, 0x31},
+ { MT_RF(0, 39), RF_A_BAND_11J, 0x36},
+
+ { MT_RF(6, 12), RF_A_BAND_LB, 0x44},
+ { MT_RF(6, 12), RF_A_BAND_MB, 0x44},
+ { MT_RF(6, 12), RF_A_BAND_HB, 0x55},
+ { MT_RF(6, 12), RF_A_BAND_11J, 0x44},
+
+ { MT_RF(6, 17), RF_A_BAND_LB, 0x02},
+ { MT_RF(6, 17), RF_A_BAND_MB, 0x00},
+ { MT_RF(6, 17), RF_A_BAND_HB, 0x00},
+ { MT_RF(6, 17), RF_A_BAND_11J, 0x05},
+
+ { MT_RF(6, 24), RF_A_BAND_LB, 0xA1},
+ { MT_RF(6, 24), RF_A_BAND_MB, 0x41},
+ { MT_RF(6, 24), RF_A_BAND_HB, 0x21},
+ { MT_RF(6, 24), RF_A_BAND_11J, 0xE1},
+
+ { MT_RF(6, 39), RF_A_BAND_LB, 0x36},
+ { MT_RF(6, 39), RF_A_BAND_MB, 0x34},
+ { MT_RF(6, 39), RF_A_BAND_HB, 0x32},
+ { MT_RF(6, 39), RF_A_BAND_11J, 0x37},
+
+ { MT_RF(6, 42), RF_A_BAND_LB, 0xFB},
+ { MT_RF(6, 42), RF_A_BAND_MB, 0xF3},
+ { MT_RF(6, 42), RF_A_BAND_HB, 0xEB},
+ { MT_RF(6, 42), RF_A_BAND_11J, 0xEB},
+
+ /* Move R6-R45, R50~R59 to mt76x0_RF_INT_PA_5G_Channel_0_RegTb/mt76x0_RF_EXT_PA_5G_Channel_0_RegTb */
+
+ { MT_RF(6, 127), RF_G_BAND, 0x84},
+ { MT_RF(6, 127), RF_A_BAND, 0x04},
+
+ { MT_RF(7, 5), RF_G_BAND, 0x40},
+ { MT_RF(7, 5), RF_A_BAND, 0x00},
+
+ { MT_RF(7, 9), RF_G_BAND, 0x00},
+ { MT_RF(7, 9), RF_A_BAND, 0x00},
+
+ { MT_RF(7, 70), RF_G_BAND, 0x00},
+ { MT_RF(7, 70), RF_A_BAND, 0x6D},
+
+ { MT_RF(7, 71), RF_G_BAND, 0x00},
+ { MT_RF(7, 71), RF_A_BAND, 0xB0},
+
+ { MT_RF(7, 78), RF_G_BAND, 0x00},
+ { MT_RF(7, 78), RF_A_BAND, 0x55},
+
+ { MT_RF(7, 79), RF_G_BAND, 0x00},
+ { MT_RF(7, 79), RF_A_BAND, 0x55},
+};
+
+static const struct mt76x0_freq_item mt76x0_frequency_plan[] = {
+ {1, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xE2, 0x40, 0x02, 0x40, 0x02, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 2412 */
+ {2, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xE4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA1, 0, 0x30, 0, 0, 0x1}, /* Freq 2417 */
+ {3, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xE2, 0x40, 0x07, 0x40, 0x0B, 0, 0, 1, 0x50, 0, 0x30, 0, 0, 0x0}, /* Freq 2422 */
+ {4, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xD4, 0x40, 0x02, 0x40, 0x09, 0, 0, 1, 0x50, 0, 0x30, 0, 0, 0x0}, /* Freq 2427 */
+ {5, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA2, 0, 0x30, 0, 0, 0x1}, /* Freq 2432 */
+ {6, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x07, 0, 0, 1, 0xA2, 0, 0x30, 0, 0, 0x1}, /* Freq 2437 */
+ {7, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xE2, 0x40, 0x02, 0x40, 0x07, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 2442 */
+ {8, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA3, 0, 0x30, 0, 0, 0x1}, /* Freq 2447 */
+ {9, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xF2, 0x40, 0x07, 0x40, 0x0D, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 2452 */
+ {10, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xD4, 0x40, 0x02, 0x40, 0x09, 0, 0, 1, 0x51, 0, 0x30, 0, 0, 0x0}, /* Freq 2457 */
+ {11, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA4, 0, 0x30, 0, 0, 0x1}, /* Freq 2462 */
+ {12, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x07, 0, 0, 1, 0xA4, 0, 0x30, 0, 0, 0x1}, /* Freq 2467 */
+ {13, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xF2, 0x40, 0x02, 0x40, 0x02, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 2472 */
+ {14, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xF2, 0x40, 0x02, 0x40, 0x04, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 2484 */
+
+ {183, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 4915 */
+ {184, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x00, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4920 */
+ {185, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4925 */
+ {187, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4935 */
+ {188, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4940 */
+ {189, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4945 */
+ {192, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4960 */
+ {196, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4980 */
+
+ {36, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5180 */
+ {37, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5185 */
+ {38, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5190 */
+ {39, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5195 */
+ {40, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5200 */
+ {41, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5205 */
+ {42, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5210 */
+ {43, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5215 */
+ {44, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5220 */
+ {45, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5225 */
+ {46, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5230 */
+ {47, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5235 */
+ {48, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5240 */
+ {49, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5245 */
+ {50, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5250 */
+ {51, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5255 */
+ {52, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5260 */
+ {53, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5265 */
+ {54, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5270 */
+ {55, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5275 */
+ {56, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5280 */
+ {57, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5285 */
+ {58, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5290 */
+ {59, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5295 */
+ {60, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5300 */
+ {61, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5305 */
+ {62, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5310 */
+ {63, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5315 */
+ {64, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5320 */
+
+ {100, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5500 */
+ {101, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5505 */
+ {102, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5510 */
+ {103, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5515 */
+ {104, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5520 */
+ {105, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5525 */
+ {106, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5530 */
+ {107, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5535 */
+ {108, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5540 */
+ {109, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5545 */
+ {110, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5550 */
+ {111, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5555 */
+ {112, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5560 */
+ {113, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5565 */
+ {114, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5570 */
+ {115, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5575 */
+ {116, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5580 */
+ {117, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5585 */
+ {118, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5590 */
+ {119, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5595 */
+ {120, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5600 */
+ {121, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5605 */
+ {122, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5610 */
+ {123, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5615 */
+ {124, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5620 */
+ {125, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5625 */
+ {126, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5630 */
+ {127, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5635 */
+ {128, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5640 */
+ {129, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5645 */
+ {130, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5650 */
+ {131, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5655 */
+ {132, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5660 */
+ {133, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5665 */
+ {134, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5670 */
+ {135, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5675 */
+ {136, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5680 */
+
+ {137, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5685 */
+ {138, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5690 */
+ {139, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5695 */
+ {140, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5700 */
+ {141, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5705 */
+ {142, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5710 */
+ {143, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5715 */
+ {144, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5720 */
+ {145, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5725 */
+ {146, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5730 */
+ {147, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5735 */
+ {148, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5740 */
+ {149, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5745 */
+ {150, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5750 */
+ {151, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5755 */
+ {152, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5760 */
+ {153, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5765 */
+ {154, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5770 */
+ {155, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5775 */
+ {156, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5780 */
+ {157, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5785 */
+ {158, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5790 */
+ {159, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5795 */
+ {160, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5800 */
+ {161, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5805 */
+ {162, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5810 */
+ {163, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5815 */
+ {164, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5820 */
+ {165, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5825 */
+ {166, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5830 */
+ {167, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5835 */
+ {168, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5840 */
+ {169, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5845 */
+ {170, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5850 */
+ {171, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5855 */
+ {172, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5860 */
+ {173, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5865 */
+};
+
+static const struct mt76x0_freq_item mt76x0_sdm_frequency_plan[] = {
+ {1, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0xCCCC, 0x3}, /* Freq 2412 */
+ {2, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x12222, 0x3}, /* Freq 2417 */
+ {3, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x17777, 0x3}, /* Freq 2422 */
+ {4, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x1CCCC, 0x3}, /* Freq 2427 */
+ {5, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x22222, 0x3}, /* Freq 2432 */
+ {6, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x27777, 0x3}, /* Freq 2437 */
+ {7, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x2CCCC, 0x3}, /* Freq 2442 */
+ {8, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x32222, 0x3}, /* Freq 2447 */
+ {9, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x37777, 0x3}, /* Freq 2452 */
+ {10, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x3CCCC, 0x3}, /* Freq 2457 */
+ {11, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x2222, 0x3}, /* Freq 2462 */
+ {12, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x7777, 0x3}, /* Freq 2467 */
+ {13, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0xCCCC, 0x3}, /* Freq 2472 */
+ {14, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x19999, 0x3}, /* Freq 2484 */
+
+ {183, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 4915 */
+ {184, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x0, 0x3}, /* Freq 4920 */
+ {185, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x2AAA, 0x3}, /* Freq 4925 */
+ {187, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x8000, 0x3}, /* Freq 4935 */
+ {188, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0xAAAA, 0x3}, /* Freq 4940 */
+ {189, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0xD555, 0x3}, /* Freq 4945 */
+ {192, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 4960 */
+ {196, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 4980 */
+
+ {36, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0xAAAA, 0x3}, /* Freq 5180 */
+ {37, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0xD555, 0x3}, /* Freq 5185 */
+ {38, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5190 */
+ {39, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5195 */
+ {40, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5200 */
+ {41, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5205 */
+ {42, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5210 */
+ {43, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5215 */
+ {44, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5220 */
+ {45, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5225 */
+ {46, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5230 */
+ {47, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5235 */
+ {48, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5240 */
+ {49, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5245 */
+ {50, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5250 */
+ {51, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5255 */
+ {52, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5260 */
+ {53, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5265 */
+ {54, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5270 */
+ {55, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5275 */
+ {56, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5280 */
+ {57, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5285 */
+ {58, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5290 */
+ {59, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5295 */
+ {60, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5300 */
+ {61, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5305 */
+ {62, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5310 */
+ {63, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5315 */
+ {64, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5320 */
+
+ {100, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5500 */
+ {101, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5505 */
+ {102, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5510 */
+ {103, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5515 */
+ {104, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5520 */
+ {105, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5525 */
+ {106, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5530 */
+ {107, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5535 */
+ {108, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5540 */
+ {109, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5545 */
+ {110, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5550 */
+ {111, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5555 */
+ {112, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5560 */
+ {113, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5565 */
+ {114, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5570 */
+ {115, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5575 */
+ {116, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5580 */
+ {117, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5585 */
+ {118, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5590 */
+ {119, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5595 */
+ {120, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5600 */
+ {121, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5605 */
+ {122, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5610 */
+ {123, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5615 */
+ {124, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5620 */
+ {125, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5625 */
+ {126, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5630 */
+ {127, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5635 */
+ {128, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5640 */
+ {129, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5645 */
+ {130, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5650 */
+ {131, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5655 */
+ {132, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5660 */
+ {133, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5665 */
+ {134, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5670 */
+ {135, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5675 */
+ {136, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5680 */
+
+ {137, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5685 */
+ {138, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5690 */
+ {139, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5695 */
+ {140, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5700 */
+ {141, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5705 */
+ {142, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5710 */
+ {143, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5715 */
+ {144, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5720 */
+ {145, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5725 */
+ {146, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5730 */
+ {147, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5735 */
+ {148, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5740 */
+ {149, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5745 */
+ {150, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5750 */
+ {151, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5755 */
+ {152, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5760 */
+ {153, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5765 */
+ {154, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5770 */
+ {155, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5775 */
+ {156, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5780 */
+ {157, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5785 */
+ {158, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5790 */
+ {159, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5795 */
+ {160, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5800 */
+ {161, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5805 */
+ {162, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5810 */
+ {163, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5815 */
+ {164, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5820 */
+ {165, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5825 */
+ {166, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5830 */
+ {167, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5835 */
+ {168, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5840 */
+ {169, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5845 */
+ {170, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5850 */
+ {171, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5855 */
+ {172, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5860 */
+ {173, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5865 */
+};
+
+static const u8 mt76x0_sdm_channel[] = {
+ 183, 185, 43, 45, 54, 55, 57, 58, 102, 103, 105, 106, 115, 117, 126, 127, 129, 130, 139, 141, 150, 151, 153, 154, 163, 165
+};
+
+static const struct mt76x0_rf_switch_item mt76x0_rf_ext_pa_tab[] = {
+ { MT_RF(6, 45), RF_A_BAND_LB, 0x63},
+ { MT_RF(6, 45), RF_A_BAND_MB, 0x43},
+ { MT_RF(6, 45), RF_A_BAND_HB, 0x33},
+ { MT_RF(6, 45), RF_A_BAND_11J, 0x73},
+
+ { MT_RF(6, 50), RF_A_BAND_LB, 0x02},
+ { MT_RF(6, 50), RF_A_BAND_MB, 0x02},
+ { MT_RF(6, 50), RF_A_BAND_HB, 0x02},
+ { MT_RF(6, 50), RF_A_BAND_11J, 0x02},
+
+ { MT_RF(6, 51), RF_A_BAND_LB, 0x02},
+ { MT_RF(6, 51), RF_A_BAND_MB, 0x02},
+ { MT_RF(6, 51), RF_A_BAND_HB, 0x02},
+ { MT_RF(6, 51), RF_A_BAND_11J, 0x02},
+
+ { MT_RF(6, 52), RF_A_BAND_LB, 0x08},
+ { MT_RF(6, 52), RF_A_BAND_MB, 0x08},
+ { MT_RF(6, 52), RF_A_BAND_HB, 0x08},
+ { MT_RF(6, 52), RF_A_BAND_11J, 0x08},
+
+ { MT_RF(6, 53), RF_A_BAND_LB, 0x08},
+ { MT_RF(6, 53), RF_A_BAND_MB, 0x08},
+ { MT_RF(6, 53), RF_A_BAND_HB, 0x08},
+ { MT_RF(6, 53), RF_A_BAND_11J, 0x08},
+
+ { MT_RF(6, 54), RF_A_BAND_LB, 0x0A},
+ { MT_RF(6, 54), RF_A_BAND_MB, 0x0A},
+ { MT_RF(6, 54), RF_A_BAND_HB, 0x0A},
+ { MT_RF(6, 54), RF_A_BAND_11J, 0x0A},
+
+ { MT_RF(6, 55), RF_A_BAND_LB, 0x0A},
+ { MT_RF(6, 55), RF_A_BAND_MB, 0x0A},
+ { MT_RF(6, 55), RF_A_BAND_HB, 0x0A},
+ { MT_RF(6, 55), RF_A_BAND_11J, 0x0A},
+
+ { MT_RF(6, 56), RF_A_BAND_LB, 0x05},
+ { MT_RF(6, 56), RF_A_BAND_MB, 0x05},
+ { MT_RF(6, 56), RF_A_BAND_HB, 0x05},
+ { MT_RF(6, 56), RF_A_BAND_11J, 0x05},
+
+ { MT_RF(6, 57), RF_A_BAND_LB, 0x05},
+ { MT_RF(6, 57), RF_A_BAND_MB, 0x05},
+ { MT_RF(6, 57), RF_A_BAND_HB, 0x05},
+ { MT_RF(6, 57), RF_A_BAND_11J, 0x05},
+
+ { MT_RF(6, 58), RF_A_BAND_LB, 0x05},
+ { MT_RF(6, 58), RF_A_BAND_MB, 0x03},
+ { MT_RF(6, 58), RF_A_BAND_HB, 0x02},
+ { MT_RF(6, 58), RF_A_BAND_11J, 0x07},
+
+ { MT_RF(6, 59), RF_A_BAND_LB, 0x05},
+ { MT_RF(6, 59), RF_A_BAND_MB, 0x03},
+ { MT_RF(6, 59), RF_A_BAND_HB, 0x02},
+ { MT_RF(6, 59), RF_A_BAND_11J, 0x07},
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c
new file mode 100644
index 000000000000..91a84be36d3b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c
@@ -0,0 +1,658 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+#include "trace.h"
+#include <linux/etherdevice.h>
+
+static void
+mt76_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
+ enum nl80211_band band)
+{
+ u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+ txrate->idx = 0;
+ txrate->flags = 0;
+ txrate->count = 1;
+
+ switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+ case MT_PHY_TYPE_OFDM:
+ if (band == NL80211_BAND_2GHZ)
+ idx += 4;
+
+ txrate->idx = idx;
+ return;
+ case MT_PHY_TYPE_CCK:
+ if (idx >= 8)
+ idx -= 8;
+
+ txrate->idx = idx;
+ return;
+ case MT_PHY_TYPE_HT_GF:
+ txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ /* fall through */
+ case MT_PHY_TYPE_HT:
+ txrate->flags |= IEEE80211_TX_RC_MCS;
+ txrate->idx = idx;
+ break;
+ case MT_PHY_TYPE_VHT:
+ txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
+ txrate->idx = idx;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+ case MT_PHY_BW_20:
+ break;
+ case MT_PHY_BW_40:
+ txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ break;
+ case MT_PHY_BW_80:
+ txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ if (rate & MT_RXWI_RATE_SGI)
+ txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
+}
+
+static void
+mt76_mac_fill_tx_status(struct mt76x0_dev *dev, struct ieee80211_tx_info *info,
+ struct mt76_tx_status *st, int n_frames)
+{
+ struct ieee80211_tx_rate *rate = info->status.rates;
+ int cur_idx, last_rate;
+ int i;
+
+ if (!n_frames)
+ return;
+
+ last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
+ mt76_mac_process_tx_rate(&rate[last_rate], st->rate,
+ dev->mt76.chandef.chan->band);
+ if (last_rate < IEEE80211_TX_MAX_RATES - 1)
+ rate[last_rate + 1].idx = -1;
+
+ cur_idx = rate[last_rate].idx + last_rate;
+ for (i = 0; i <= last_rate; i++) {
+ rate[i].flags = rate[last_rate].flags;
+ rate[i].idx = max_t(int, 0, cur_idx - i);
+ rate[i].count = 1;
+ }
+
+ rate[last_rate - 1].count = st->retry + 1 - last_rate;
+
+ info->status.ampdu_len = n_frames;
+ info->status.ampdu_ack_len = st->success ? n_frames : 0;
+
+ if (st->pktid & MT_TXWI_PKTID_PROBE)
+ info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+
+ if (st->aggr)
+ info->flags |= IEEE80211_TX_CTL_AMPDU |
+ IEEE80211_TX_STAT_AMPDU;
+
+ if (!st->ack_req)
+ info->flags |= IEEE80211_TX_CTL_NO_ACK;
+ else if (st->success)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+}
+
+u16 mt76x0_mac_tx_rate_val(struct mt76x0_dev *dev,
+ const struct ieee80211_tx_rate *rate, u8 *nss_val)
+{
+ u16 rateval;
+ u8 phy, rate_idx;
+ u8 nss = 1;
+ u8 bw = 0;
+
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+ rate_idx = rate->idx;
+ nss = 1 + (rate->idx >> 4);
+ phy = MT_PHY_TYPE_VHT;
+ if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ bw = 2;
+ else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ bw = 1;
+ } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+ rate_idx = rate->idx;
+ nss = 1 + (rate->idx >> 3);
+ phy = MT_PHY_TYPE_HT;
+ if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+ phy = MT_PHY_TYPE_HT_GF;
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ bw = 1;
+ } else {
+ const struct ieee80211_rate *r;
+ int band = dev->mt76.chandef.chan->band;
+ u16 val;
+
+ r = &dev->mt76.hw->wiphy->bands[band]->bitrates[rate->idx];
+ if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ val = r->hw_value_short;
+ else
+ val = r->hw_value;
+
+ phy = val >> 8;
+ rate_idx = val & 0xff;
+ bw = 0;
+ }
+
+ rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
+ rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
+ rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ rateval |= MT_RXWI_RATE_SGI;
+
+ *nss_val = nss;
+ return cpu_to_le16(rateval);
+}
+
+void mt76x0_mac_wcid_set_rate(struct mt76x0_dev *dev, struct mt76_wcid *wcid,
+ const struct ieee80211_tx_rate *rate)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->mt76.lock, flags);
+ wcid->tx_rate = mt76x0_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
+ wcid->tx_rate_set = true;
+ spin_unlock_irqrestore(&dev->mt76.lock, flags);
+}
+
+struct mt76_tx_status mt76x0_mac_fetch_tx_status(struct mt76x0_dev *dev)
+{
+ struct mt76_tx_status stat = {};
+ u32 stat2, stat1;
+
+ stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
+ stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
+
+ stat.valid = !!(stat1 & MT_TX_STAT_FIFO_VALID);
+ stat.success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
+ stat.aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
+ stat.ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
+ stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
+ stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
+
+ stat.retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
+ stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
+
+ return stat;
+}
+
+void mt76x0_send_tx_status(struct mt76x0_dev *dev, struct mt76_tx_status *stat, u8 *update)
+{
+ struct ieee80211_tx_info info = {};
+ struct ieee80211_sta *sta = NULL;
+ struct mt76_wcid *wcid = NULL;
+ struct mt76_sta *msta = NULL;
+
+ rcu_read_lock();
+ if (stat->wcid < ARRAY_SIZE(dev->wcid))
+ wcid = rcu_dereference(dev->wcid[stat->wcid]);
+
+ if (wcid) {
+ void *priv;
+ priv = msta = container_of(wcid, struct mt76_sta, wcid);
+ sta = container_of(priv, struct ieee80211_sta, drv_priv);
+ }
+
+ if (msta && stat->aggr) {
+ u32 stat_val, stat_cache;
+
+ stat_val = stat->rate;
+ stat_val |= ((u32) stat->retry) << 16;
+ stat_cache = msta->status.rate;
+ stat_cache |= ((u32) msta->status.retry) << 16;
+
+ if (*update == 0 && stat_val == stat_cache &&
+ stat->wcid == msta->status.wcid && msta->n_frames < 32) {
+ msta->n_frames++;
+ goto out;
+ }
+
+ mt76_mac_fill_tx_status(dev, &info, &msta->status,
+ msta->n_frames);
+ msta->status = *stat;
+ msta->n_frames = 1;
+ *update = 0;
+ } else {
+ mt76_mac_fill_tx_status(dev, &info, stat, 1);
+ *update = 1;
+ }
+
+ spin_lock_bh(&dev->mac_lock);
+ ieee80211_tx_status_noskb(dev->mt76.hw, sta, &info);
+ spin_unlock_bh(&dev->mac_lock);
+out:
+ rcu_read_unlock();
+}
+
+void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot,
+ int ht_mode)
+{
+ int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION;
+ bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+ u32 prot[6];
+ bool ht_rts[4] = {};
+ int i;
+
+ prot[0] = MT_PROT_NAV_SHORT |
+ MT_PROT_TXOP_ALLOW_ALL |
+ MT_PROT_RTS_THR_EN;
+ prot[1] = prot[0];
+ if (legacy_prot)
+ prot[1] |= MT_PROT_CTRL_CTS2SELF;
+
+ prot[2] = prot[4] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_BW20;
+ prot[3] = prot[5] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_ALL;
+
+ if (legacy_prot) {
+ prot[2] |= MT_PROT_RATE_CCK_11;
+ prot[3] |= MT_PROT_RATE_CCK_11;
+ prot[4] |= MT_PROT_RATE_CCK_11;
+ prot[5] |= MT_PROT_RATE_CCK_11;
+ } else {
+ prot[2] |= MT_PROT_RATE_OFDM_24;
+ prot[3] |= MT_PROT_RATE_DUP_OFDM_24;
+ prot[4] |= MT_PROT_RATE_OFDM_24;
+ prot[5] |= MT_PROT_RATE_DUP_OFDM_24;
+ }
+
+ switch (mode) {
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
+ break;
+
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
+ ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true;
+ break;
+
+ case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+ ht_rts[1] = ht_rts[3] = true;
+ break;
+
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+ ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true;
+ break;
+ }
+
+ if (non_gf)
+ ht_rts[2] = ht_rts[3] = true;
+
+ for (i = 0; i < 4; i++)
+ if (ht_rts[i])
+ prot[i + 2] |= MT_PROT_CTRL_RTS_CTS;
+
+ for (i = 0; i < 6; i++)
+ mt76_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]);
+}
+
+void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb)
+{
+ if (short_preamb)
+ mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
+ else
+ mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
+}
+
+void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval)
+{
+ u32 val = mt76_rr(dev, MT_BEACON_TIME_CFG);
+
+ val &= ~(MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE |
+ MT_BEACON_TIME_CFG_TBTT_EN);
+
+ if (!enable) {
+ mt76_wr(dev, MT_BEACON_TIME_CFG, val);
+ return;
+ }
+
+ val &= ~MT_BEACON_TIME_CFG_INTVAL;
+ val |= FIELD_PREP(MT_BEACON_TIME_CFG_INTVAL, interval << 4) |
+ MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE |
+ MT_BEACON_TIME_CFG_TBTT_EN;
+}
+
+static void mt76x0_check_mac_err(struct mt76x0_dev *dev)
+{
+ u32 val = mt76_rr(dev, 0x10f4);
+
+ if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
+ return;
+
+ dev_err(dev->mt76.dev, "Error: MAC specific condition occurred\n");
+
+ mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
+ udelay(10);
+ mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
+}
+void mt76x0_mac_work(struct work_struct *work)
+{
+ struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev,
+ mac_work.work);
+ struct {
+ u32 addr_base;
+ u32 span;
+ u64 *stat_base;
+ } spans[] = {
+ { MT_RX_STA_CNT0, 3, dev->stats.rx_stat },
+ { MT_TX_STA_CNT0, 3, dev->stats.tx_stat },
+ { MT_TX_AGG_STAT, 1, dev->stats.aggr_stat },
+ { MT_MPDU_DENSITY_CNT, 1, dev->stats.zero_len_del },
+ { MT_TX_AGG_CNT_BASE0, 8, &dev->stats.aggr_n[0] },
+ { MT_TX_AGG_CNT_BASE1, 8, &dev->stats.aggr_n[16] },
+ };
+ u32 sum, n;
+ int i, j, k;
+
+ /* Note: using MCU_RANDOM_READ is actually slower then reading all the
+ * registers by hand. MCU takes ca. 20ms to complete read of 24
+ * registers while reading them one by one will takes roughly
+ * 24*200us =~ 5ms.
+ */
+
+ k = 0;
+ n = 0;
+ sum = 0;
+ for (i = 0; i < ARRAY_SIZE(spans); i++)
+ for (j = 0; j < spans[i].span; j++) {
+ u32 val = mt76_rr(dev, spans[i].addr_base + j * 4);
+
+ spans[i].stat_base[j * 2] += val & 0xffff;
+ spans[i].stat_base[j * 2 + 1] += val >> 16;
+
+ /* Calculate average AMPDU length */
+ if (spans[i].addr_base != MT_TX_AGG_CNT_BASE0 &&
+ spans[i].addr_base != MT_TX_AGG_CNT_BASE1)
+ continue;
+
+ n += (val >> 16) + (val & 0xffff);
+ sum += (val & 0xffff) * (1 + k * 2) +
+ (val >> 16) * (2 + k * 2);
+ k++;
+ }
+
+ atomic_set(&dev->avg_ampdu_len, n ? DIV_ROUND_CLOSEST(sum, n) : 1);
+
+ mt76x0_check_mac_err(dev);
+
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work, 10 * HZ);
+}
+
+void
+mt76x0_mac_wcid_setup(struct mt76x0_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
+{
+ u8 zmac[ETH_ALEN] = {};
+ u32 attr;
+
+ attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
+ FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
+
+ mt76_wr(dev, MT_WCID_ATTR(idx), attr);
+
+ if (mac)
+ memcpy(zmac, mac, sizeof(zmac));
+
+ mt76x0_addr_wr(dev, MT_WCID_ADDR(idx), zmac);
+}
+
+void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev)
+{
+ struct ieee80211_sta *sta;
+ struct mt76_wcid *wcid;
+ void *msta;
+ u8 min_factor = 3;
+ int i;
+
+ rcu_read_lock();
+ for (i = 0; i < ARRAY_SIZE(dev->wcid); i++) {
+ wcid = rcu_dereference(dev->wcid[i]);
+ if (!wcid)
+ continue;
+
+ msta = container_of(wcid, struct mt76_sta, wcid);
+ sta = container_of(msta, struct ieee80211_sta, drv_priv);
+
+ min_factor = min(min_factor, sta->ht_cap.ampdu_factor);
+ }
+ rcu_read_unlock();
+
+ mt76_wr(dev, MT_MAX_LEN_CFG, 0xa0fff |
+ FIELD_PREP(MT_MAX_LEN_CFG_AMPDU, min_factor));
+}
+
+static void
+mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
+{
+ u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+ switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+ case MT_PHY_TYPE_OFDM:
+ if (idx >= 8)
+ idx = 0;
+
+ if (status->band == NL80211_BAND_2GHZ)
+ idx += 4;
+
+ status->rate_idx = idx;
+ return;
+ case MT_PHY_TYPE_CCK:
+ if (idx >= 8) {
+ idx -= 8;
+ status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+ }
+
+ if (idx >= 4)
+ idx = 0;
+
+ status->rate_idx = idx;
+ return;
+ case MT_PHY_TYPE_HT_GF:
+ status->enc_flags |= RX_ENC_FLAG_HT_GF;
+ /* fall through */
+ case MT_PHY_TYPE_HT:
+ status->encoding = RX_ENC_HT;
+ status->rate_idx = idx;
+ break;
+ case MT_PHY_TYPE_VHT:
+ status->encoding = RX_ENC_VHT;
+ status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
+ status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ if (rate & MT_RXWI_RATE_LDPC)
+ status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+ if (rate & MT_RXWI_RATE_SGI)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ if (rate & MT_RXWI_RATE_STBC)
+ status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
+
+ switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+ case MT_PHY_BW_20:
+ break;
+ case MT_PHY_BW_40:
+ status->bw = RATE_INFO_BW_40;
+ break;
+ case MT_PHY_BW_80:
+ status->bw = RATE_INFO_BW_80;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static void
+mt76x0_rx_monitor_beacon(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi,
+ u16 rate, int rssi)
+{
+ dev->bcn_phy_mode = FIELD_GET(MT_RXWI_RATE_PHY, rate);
+ dev->avg_rssi = ((dev->avg_rssi * 15) / 16 + (rssi << 8)) / 256;
+}
+
+static int
+mt76x0_rx_is_our_beacon(struct mt76x0_dev *dev, u8 *data)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
+
+ return ieee80211_is_beacon(hdr->frame_control) &&
+ ether_addr_equal(hdr->addr2, dev->ap_bssid);
+}
+
+u32 mt76x0_mac_process_rx(struct mt76x0_dev *dev, struct sk_buff *skb,
+ u8 *data, void *rxi)
+{
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct mt76x0_rxwi *rxwi = rxi;
+ u32 len, ctl = le32_to_cpu(rxwi->ctl);
+ u16 rate = le16_to_cpu(rxwi->rate);
+ int rssi;
+
+ len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
+ if (WARN_ON(len < 10))
+ return 0;
+
+ if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_DECRYPT)) {
+ status->flag |= RX_FLAG_DECRYPTED;
+ status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
+ }
+
+ status->chains = BIT(0);
+ rssi = mt76x0_phy_get_rssi(dev, rxwi);
+ status->chain_signal[0] = status->signal = rssi;
+ status->freq = dev->mt76.chandef.chan->center_freq;
+ status->band = dev->mt76.chandef.chan->band;
+
+ mt76_mac_process_rate(status, rate);
+
+ spin_lock_bh(&dev->con_mon_lock);
+ if (mt76x0_rx_is_our_beacon(dev, data)) {
+ mt76x0_rx_monitor_beacon(dev, rxwi, rate, rssi);
+ } else if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_U2M)) {
+ if (dev->avg_rssi == 0)
+ dev->avg_rssi = rssi;
+ else
+ dev->avg_rssi = (dev->avg_rssi * 15) / 16 + rssi / 16;
+
+ }
+ spin_unlock_bh(&dev->con_mon_lock);
+
+ return len;
+}
+
+static enum mt76_cipher_type
+mt76_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+{
+ memset(key_data, 0, 32);
+ if (!key)
+ return MT_CIPHER_NONE;
+
+ if (key->keylen > 32)
+ return MT_CIPHER_NONE;
+
+ memcpy(key_data, key->key, key->keylen);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return MT_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return MT_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return MT_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return MT_CIPHER_AES_CCMP;
+ default:
+ return MT_CIPHER_NONE;
+ }
+}
+
+int mt76x0_mac_wcid_set_key(struct mt76x0_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key)
+{
+ enum mt76_cipher_type cipher;
+ u8 key_data[32];
+ u8 iv_data[8];
+ u32 val;
+
+ cipher = mt76_mac_get_key_info(key, key_data);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EINVAL;
+
+ trace_mt76x0_set_key(&dev->mt76, idx);
+
+ mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
+
+ memset(iv_data, 0, sizeof(iv_data));
+ if (key) {
+ iv_data[3] = key->keyidx << 6;
+ if (cipher >= MT_CIPHER_TKIP) {
+ /* Note: start with 1 to comply with spec,
+ * (see comment on common/cmm_wpa.c:4291).
+ */
+ iv_data[0] |= 1;
+ iv_data[3] |= 0x20;
+ }
+ }
+ mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
+
+ val = mt76_rr(dev, MT_WCID_ATTR(idx));
+ val &= ~MT_WCID_ATTR_PKEY_MODE & ~MT_WCID_ATTR_PKEY_MODE_EXT;
+ val |= FIELD_PREP(MT_WCID_ATTR_PKEY_MODE, cipher & 7) |
+ FIELD_PREP(MT_WCID_ATTR_PKEY_MODE_EXT, cipher >> 3);
+ val &= ~MT_WCID_ATTR_PAIRWISE;
+ val |= MT_WCID_ATTR_PAIRWISE *
+ !!(key && key->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+ mt76_wr(dev, MT_WCID_ATTR(idx), val);
+
+ return 0;
+}
+
+int mt76x0_mac_shared_key_setup(struct mt76x0_dev *dev, u8 vif_idx, u8 key_idx,
+ struct ieee80211_key_conf *key)
+{
+ enum mt76_cipher_type cipher;
+ u8 key_data[32];
+ u32 val;
+
+ cipher = mt76_mac_get_key_info(key, key_data);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EINVAL;
+
+ trace_mt76x0_set_shared_key(&dev->mt76, vif_idx, key_idx);
+
+ mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx),
+ key_data, sizeof(key_data));
+
+ val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
+ val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
+ val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
+ mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h
new file mode 100644
index 000000000000..bea067b71c13
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76_MAC_H
+#define __MT76_MAC_H
+
+/* Note: values in original "RSSI" and "SNR" fields are not actually what they
+ * are called for MT76X0U, names used by this driver are educated guesses
+ * (see vendor mac/ral_omac.c).
+ */
+struct mt76x0_rxwi {
+ __le32 rxinfo;
+
+ __le32 ctl;
+
+ __le16 tid_sn;
+ __le16 rate;
+
+ s8 rssi[4];
+
+ __le32 bbp_rxinfo[4];
+} __packed __aligned(4);
+
+#define MT_RXINFO_BA BIT(0)
+#define MT_RXINFO_DATA BIT(1)
+#define MT_RXINFO_NULL BIT(2)
+#define MT_RXINFO_FRAG BIT(3)
+#define MT_RXINFO_U2M BIT(4)
+#define MT_RXINFO_MULTICAST BIT(5)
+#define MT_RXINFO_BROADCAST BIT(6)
+#define MT_RXINFO_MYBSS BIT(7)
+#define MT_RXINFO_CRCERR BIT(8)
+#define MT_RXINFO_ICVERR BIT(9)
+#define MT_RXINFO_MICERR BIT(10)
+#define MT_RXINFO_AMSDU BIT(11)
+#define MT_RXINFO_HTC BIT(12)
+#define MT_RXINFO_RSSI BIT(13)
+#define MT_RXINFO_L2PAD BIT(14)
+#define MT_RXINFO_AMPDU BIT(15)
+#define MT_RXINFO_DECRYPT BIT(16)
+#define MT_RXINFO_BSSIDX3 BIT(17)
+#define MT_RXINFO_WAPI_KEY BIT(18)
+#define MT_RXINFO_PN_LEN GENMASK(21, 19)
+#define MT_RXINFO_SW_PKT_80211 BIT(22)
+#define MT_RXINFO_TCP_SUM_BYPASS BIT(28)
+#define MT_RXINFO_IP_SUM_BYPASS BIT(29)
+#define MT_RXINFO_TCP_SUM_ERR BIT(30)
+#define MT_RXINFO_IP_SUM_ERR BIT(31)
+
+#define MT_RXWI_CTL_WCID GENMASK(7, 0)
+#define MT_RXWI_CTL_KEY_IDX GENMASK(9, 8)
+#define MT_RXWI_CTL_BSS_IDX GENMASK(12, 10)
+#define MT_RXWI_CTL_UDF GENMASK(15, 13)
+#define MT_RXWI_CTL_MPDU_LEN GENMASK(27, 16)
+#define MT_RXWI_CTL_TID GENMASK(31, 28)
+
+#define MT_RXWI_FRAG GENMASK(3, 0)
+#define MT_RXWI_SN GENMASK(15, 4)
+
+#define MT_RXWI_RATE_INDEX GENMASK(5, 0)
+#define MT_RXWI_RATE_LDPC BIT(6)
+#define MT_RXWI_RATE_BW GENMASK(8, 7)
+#define MT_RXWI_RATE_SGI BIT(9)
+#define MT_RXWI_RATE_STBC BIT(10)
+#define MT_RXWI_RATE_LDPC_ETXBF BIT(11)
+#define MT_RXWI_RATE_SND BIT(12)
+#define MT_RXWI_RATE_PHY GENMASK(15, 13)
+
+#define MT_RATE_INDEX_VHT_IDX GENMASK(3, 0)
+#define MT_RATE_INDEX_VHT_NSS GENMASK(5, 4)
+
+#define MT_RXWI_GAIN_RSSI_VAL GENMASK(5, 0)
+#define MT_RXWI_GAIN_RSSI_LNA_ID GENMASK(7, 6)
+#define MT_RXWI_ANT_AUX_LNA BIT(7)
+
+#define MT_RXWI_EANT_ENC_ANT_ID GENMASK(7, 0)
+
+enum mt76_phy_bandwidth {
+ MT_PHY_BW_20,
+ MT_PHY_BW_40,
+ MT_PHY_BW_80,
+};
+
+struct mt76_txwi {
+ __le16 flags;
+ __le16 rate_ctl;
+ u8 ack_ctl;
+ u8 wcid;
+ __le16 len_ctl;
+ __le32 iv;
+ __le32 eiv;
+ u8 aid;
+ u8 txstream;
+ u8 ctl2;
+ u8 pktid;
+} __packed __aligned(4);
+
+#define MT_TXWI_FLAGS_FRAG BIT(0)
+#define MT_TXWI_FLAGS_MMPS BIT(1)
+#define MT_TXWI_FLAGS_CFACK BIT(2)
+#define MT_TXWI_FLAGS_TS BIT(3)
+#define MT_TXWI_FLAGS_AMPDU BIT(4)
+#define MT_TXWI_FLAGS_MPDU_DENSITY GENMASK(7, 5)
+#define MT_TXWI_FLAGS_TXOP GENMASK(9, 8)
+#define MT_TXWI_FLAGS_CWMIN GENMASK(12, 10)
+#define MT_TXWI_FLAGS_NO_RATE_FALLBACK BIT(13)
+#define MT_TXWI_FLAGS_TX_RPT BIT(14)
+#define MT_TXWI_FLAGS_TX_RATE_LUT BIT(15)
+
+#define MT_TXWI_RATE_MCS GENMASK(6, 0)
+#define MT_TXWI_RATE_BW BIT(7)
+#define MT_TXWI_RATE_SGI BIT(8)
+#define MT_TXWI_RATE_STBC GENMASK(10, 9)
+#define MT_TXWI_RATE_PHY_MODE GENMASK(15, 14)
+
+#define MT_TXWI_ACK_CTL_REQ BIT(0)
+#define MT_TXWI_ACK_CTL_NSEQ BIT(1)
+#define MT_TXWI_ACK_CTL_BA_WINDOW GENMASK(7, 2)
+
+#define MT_TXWI_LEN_BYTE_CNT GENMASK(11, 0)
+
+#define MT_TXWI_CTL_TX_POWER_ADJ GENMASK(3, 0)
+#define MT_TXWI_CTL_CHAN_CHECK_PKT BIT(4)
+#define MT_TXWI_CTL_PIFS_REV BIT(6)
+
+#define MT_TXWI_PKTID_PROBE BIT(7)
+
+u32 mt76x0_mac_process_rx(struct mt76x0_dev *dev, struct sk_buff *skb,
+ u8 *data, void *rxi);
+int mt76x0_mac_wcid_set_key(struct mt76x0_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key);
+void mt76x0_mac_wcid_set_rate(struct mt76x0_dev *dev, struct mt76_wcid *wcid,
+ const struct ieee80211_tx_rate *rate);
+
+int mt76x0_mac_shared_key_setup(struct mt76x0_dev *dev, u8 vif_idx, u8 key_idx,
+ struct ieee80211_key_conf *key);
+u16 mt76x0_mac_tx_rate_val(struct mt76x0_dev *dev,
+ const struct ieee80211_tx_rate *rate, u8 *nss_val);
+struct mt76_tx_status
+mt76x0_mac_fetch_tx_status(struct mt76x0_dev *dev);
+void mt76x0_send_tx_status(struct mt76x0_dev *dev, struct mt76_tx_status *stat, u8 *update);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
new file mode 100644
index 000000000000..cf6ffb1ba4a2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -0,0 +1,403 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+#include "mac.h"
+#include <linux/etherdevice.h>
+
+static int mt76x0_start(struct ieee80211_hw *hw)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ int ret;
+
+ mutex_lock(&dev->mutex);
+
+ ret = mt76x0_mac_start(dev);
+ if (ret)
+ goto out;
+
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
+ MT_CALIBRATE_INTERVAL);
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+out:
+ mutex_unlock(&dev->mutex);
+ return ret;
+}
+
+static void mt76x0_stop(struct ieee80211_hw *hw)
+{
+ struct mt76x0_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mutex);
+
+ cancel_delayed_work_sync(&dev->cal_work);
+ cancel_delayed_work_sync(&dev->mac_work);
+ mt76x0_mac_stop(dev);
+
+ mutex_unlock(&dev->mutex);
+}
+
+
+static int mt76x0_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+ unsigned int idx;
+
+ idx = ffs(~dev->vif_mask);
+ if (!idx || idx > 8)
+ return -ENOSPC;
+
+ idx--;
+ dev->vif_mask |= BIT(idx);
+
+ mvif->idx = idx;
+ mvif->group_wcid.idx = GROUP_WCID(idx);
+ mvif->group_wcid.hw_key_idx = -1;
+
+ return 0;
+}
+
+static void mt76x0_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+ unsigned int wcid = mvif->group_wcid.idx;
+
+ dev->wcid_mask[wcid / BITS_PER_LONG] &= ~BIT(wcid % BITS_PER_LONG);
+}
+
+static int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ int ret = 0;
+
+ mutex_lock(&dev->mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+ dev->rxfilter |= MT_RX_FILTR_CFG_PROMISC;
+ else
+ dev->rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
+
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ieee80211_stop_queues(hw);
+ ret = mt76x0_phy_set_channel(dev, &hw->conf.chandef);
+ ieee80211_wake_queues(hw);
+ }
+
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+
+static void
+mt76_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+ flags |= *total_flags & FIF_##_flag; \
+ dev->rxfilter &= ~(_hw); \
+ dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ } while (0)
+
+ mutex_lock(&dev->mutex);
+
+ dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
+
+ MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
+ MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
+ MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
+ MT_RX_FILTR_CFG_CTS |
+ MT_RX_FILTR_CFG_CFEND |
+ MT_RX_FILTR_CFG_CFACK |
+ MT_RX_FILTR_CFG_BA |
+ MT_RX_FILTR_CFG_CTRL_RSV);
+ MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
+
+ *total_flags = flags;
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+ mutex_unlock(&dev->mutex);
+}
+
+static void
+mt76x0_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
+{
+ struct mt76x0_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mutex);
+
+ if (changed & BSS_CHANGED_ASSOC)
+ mt76x0_phy_con_cal_onoff(dev, info);
+
+ if (changed & BSS_CHANGED_BSSID) {
+ mt76x0_addr_wr(dev, MT_MAC_BSSID_DW0, info->bssid);
+
+ /* Note: this is a hack because beacon_int is not changed
+ * on leave nor is any more appropriate event generated.
+ * rt2x00 doesn't seem to be bothered though.
+ */
+ if (is_zero_ether_addr(info->bssid))
+ mt76x0_mac_config_tsf(dev, false, 0);
+ }
+
+ if (changed & BSS_CHANGED_BASIC_RATES) {
+ mt76_wr(dev, MT_LEGACY_BASIC_RATE, info->basic_rates);
+ mt76_wr(dev, MT_HT_FBK_CFG0, 0x65432100);
+ mt76_wr(dev, MT_HT_FBK_CFG1, 0xedcba980);
+ mt76_wr(dev, MT_LG_FBK_CFG0, 0xedcba988);
+ mt76_wr(dev, MT_LG_FBK_CFG1, 0x00002100);
+ }
+
+ if (changed & BSS_CHANGED_BEACON_INT)
+ mt76x0_mac_config_tsf(dev, true, info->beacon_int);
+
+ if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT)
+ mt76x0_mac_set_protection(dev, info->use_cts_prot,
+ info->ht_operation_mode);
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE)
+ mt76x0_mac_set_short_preamble(dev, info->use_short_preamble);
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ int slottime = info->use_short_slot ? 9 : 20;
+
+ mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG,
+ MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC)
+ mt76x0_phy_recalibrate_after_assoc(dev);
+
+ mutex_unlock(&dev->mutex);
+}
+
+static int
+mt76x0_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+ struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+ int ret = 0;
+ int idx = 0;
+
+ mutex_lock(&dev->mutex);
+
+ idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ msta->wcid.idx = idx;
+ msta->wcid.hw_key_idx = -1;
+ mt76x0_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
+ mt76_clear(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx));
+ rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
+ mt76x0_mac_set_ampdu_factor(dev);
+
+out:
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+
+static int
+mt76x0_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+ int idx = msta->wcid.idx;
+
+ mutex_lock(&dev->mutex);
+ rcu_assign_pointer(dev->wcid[idx], NULL);
+ mt76_set(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx));
+ dev->wcid_mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG);
+ mt76x0_mac_wcid_setup(dev, idx, 0, NULL);
+ mt76x0_mac_set_ampdu_factor(dev);
+ mutex_unlock(&dev->mutex);
+
+ return 0;
+}
+
+static void
+mt76x0_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
+{
+}
+
+static void
+mt76x0_sw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
+{
+ struct mt76x0_dev *dev = hw->priv;
+
+ cancel_delayed_work_sync(&dev->cal_work);
+ mt76x0_agc_save(dev);
+ set_bit(MT76_SCANNING, &dev->mt76.state);
+}
+
+static void
+mt76x0_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt76x0_dev *dev = hw->priv;
+
+ mt76x0_agc_restore(dev);
+ clear_bit(MT76_SCANNING, &dev->mt76.state);
+
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+}
+
+static int
+mt76x0_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+ struct mt76_sta *msta = sta ? (struct mt76_sta *) sta->drv_priv : NULL;
+ struct mt76_wcid *wcid = msta ? &msta->wcid : &mvif->group_wcid;
+ int idx = key->keyidx;
+ int ret;
+
+ if (cmd == SET_KEY) {
+ key->hw_key_idx = wcid->idx;
+ wcid->hw_key_idx = idx;
+ } else {
+ if (idx == wcid->hw_key_idx)
+ wcid->hw_key_idx = -1;
+
+ key = NULL;
+ }
+
+ if (!msta) {
+ if (key || wcid->hw_key_idx == idx) {
+ ret = mt76x0_mac_wcid_set_key(dev, wcid->idx, key);
+ if (ret)
+ return ret;
+ }
+
+ return mt76x0_mac_shared_key_setup(dev, mvif->idx, idx, key);
+ }
+
+ return mt76x0_mac_wcid_set_key(dev, msta->wcid.idx, key);
+}
+
+static int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct mt76x0_dev *dev = hw->priv;
+
+ mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, value);
+
+ return 0;
+}
+
+static int
+mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
+ struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+
+ WARN_ON(msta->wcid.idx > N_WCIDS);
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ieee80211_send_bar(vif, sta->addr, tid, msta->agg_ssn[tid]);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ msta->agg_ssn[tid] = *ssn << 4;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ }
+
+ return 0;
+}
+
+static void
+mt76_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+ struct ieee80211_sta_rates *rates;
+ struct ieee80211_tx_rate rate = {};
+
+ rcu_read_lock();
+ rates = rcu_dereference(sta->rates);
+
+ if (!rates)
+ goto out;
+
+ rate.idx = rates->rate[0].idx;
+ rate.flags = rates->rate[0].flags;
+ mt76x0_mac_wcid_set_rate(dev, &msta->wcid, &rate);
+
+out:
+ rcu_read_unlock();
+}
+
+const struct ieee80211_ops mt76x0_ops = {
+ .tx = mt76x0_tx,
+ .start = mt76x0_start,
+ .stop = mt76x0_stop,
+ .add_interface = mt76x0_add_interface,
+ .remove_interface = mt76x0_remove_interface,
+ .config = mt76x0_config,
+ .configure_filter = mt76_configure_filter,
+ .bss_info_changed = mt76x0_bss_info_changed,
+ .sta_add = mt76x0_sta_add,
+ .sta_remove = mt76x0_sta_remove,
+ .sta_notify = mt76x0_sta_notify,
+ .set_key = mt76x0_set_key,
+ .conf_tx = mt76x0_conf_tx,
+ .sw_scan_start = mt76x0_sw_scan,
+ .sw_scan_complete = mt76x0_sw_scan_complete,
+ .ampdu_action = mt76_ampdu_action,
+ .sta_rate_tbl_update = mt76_sta_rate_tbl_update,
+ .set_rts_threshold = mt76x0_set_rts_threshold,
+};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c
new file mode 100644
index 000000000000..8affacbab90a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c
@@ -0,0 +1,656 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/usb.h>
+#include <linux/skbuff.h>
+
+#include "mt76x0.h"
+#include "dma.h"
+#include "mcu.h"
+#include "usb.h"
+#include "trace.h"
+
+#define MCU_FW_URB_MAX_PAYLOAD 0x38f8
+#define MCU_FW_URB_SIZE (MCU_FW_URB_MAX_PAYLOAD + 12)
+#define MCU_RESP_URB_SIZE 1024
+
+static inline int firmware_running(struct mt76x0_dev *dev)
+{
+ return mt76_rr(dev, MT_MCU_COM_REG0) == 1;
+}
+
+static inline void skb_put_le32(struct sk_buff *skb, u32 val)
+{
+ put_unaligned_le32(val, skb_put(skb, 4));
+}
+
+static inline void mt76x0_dma_skb_wrap_cmd(struct sk_buff *skb,
+ u8 seq, enum mcu_cmd cmd)
+{
+ WARN_ON(mt76x0_dma_skb_wrap(skb, CPU_TX_PORT, DMA_COMMAND,
+ FIELD_PREP(MT_TXD_CMD_SEQ, seq) |
+ FIELD_PREP(MT_TXD_CMD_TYPE, cmd)));
+}
+
+static inline void trace_mt76x0_mcu_msg_send_cs(struct mt76_dev *dev,
+ struct sk_buff *skb, bool need_resp)
+{
+ u32 i, csum = 0;
+
+ for (i = 0; i < skb->len / 4; i++)
+ csum ^= get_unaligned_le32(skb->data + i * 4);
+
+ trace_mt76x0_mcu_msg_send(dev, skb, csum, need_resp);
+}
+
+static struct sk_buff *
+mt76x0_mcu_msg_alloc(struct mt76x0_dev *dev, const void *data, int len)
+{
+ struct sk_buff *skb;
+
+ WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */
+
+ skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (skb) {
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+ memcpy(skb_put(skb, len), data, len);
+ }
+ return skb;
+}
+
+static void mt76x0_read_resp_regs(struct mt76x0_dev *dev, int len)
+{
+ int i;
+ int n = dev->mcu.reg_pairs_len;
+ u8 *buf = dev->mcu.resp.buf;
+
+ buf += 4;
+ len -= 8;
+
+ if (dev->mcu.burst_read) {
+ u32 reg = dev->mcu.reg_pairs[0].reg - dev->mcu.reg_base;
+
+ WARN_ON_ONCE(len/4 != n);
+ for (i = 0; i < n; i++) {
+ u32 val = get_unaligned_le32(buf + 4*i);
+
+ dev->mcu.reg_pairs[i].reg = reg++;
+ dev->mcu.reg_pairs[i].value = val;
+ }
+ } else {
+ WARN_ON_ONCE(len/8 != n);
+ for (i = 0; i < n; i++) {
+ u32 reg = get_unaligned_le32(buf + 8*i) - dev->mcu.reg_base;
+ u32 val = get_unaligned_le32(buf + 8*i + 4);
+
+ WARN_ON_ONCE(dev->mcu.reg_pairs[i].reg != reg);
+ dev->mcu.reg_pairs[i].value = val;
+ }
+ }
+}
+
+static int mt76x0_mcu_wait_resp(struct mt76x0_dev *dev, u8 seq)
+{
+ struct urb *urb = dev->mcu.resp.urb;
+ u32 rxfce;
+ int urb_status, ret, try = 5;
+
+ while (try--) {
+ if (!wait_for_completion_timeout(&dev->mcu.resp_cmpl,
+ msecs_to_jiffies(300))) {
+ dev_warn(dev->mt76.dev, "Warning: %s retrying\n", __func__);
+ continue;
+ }
+
+ /* Make copies of important data before reusing the urb */
+ rxfce = get_unaligned_le32(dev->mcu.resp.buf);
+ urb_status = urb->status * mt76x0_urb_has_error(urb);
+
+ if (urb_status == 0 && dev->mcu.reg_pairs)
+ mt76x0_read_resp_regs(dev, urb->actual_length);
+
+ ret = mt76x0_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
+ &dev->mcu.resp, GFP_KERNEL,
+ mt76x0_complete_urb,
+ &dev->mcu.resp_cmpl);
+ if (ret)
+ return ret;
+
+ if (urb_status)
+ dev_err(dev->mt76.dev, "Error: MCU resp urb failed:%d\n",
+ urb_status);
+
+ if (FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce) == seq &&
+ FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce) == CMD_DONE)
+ return 0;
+
+ dev_err(dev->mt76.dev, "Error: MCU resp evt:%lx seq:%hhx-%lx!\n",
+ FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce),
+ seq, FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce));
+ }
+
+ dev_err(dev->mt76.dev, "Error: %s timed out\n", __func__);
+ return -ETIMEDOUT;
+}
+
+static int
+__mt76x0_mcu_msg_send(struct mt76x0_dev *dev, struct sk_buff *skb,
+ enum mcu_cmd cmd, bool wait_resp)
+{
+ struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+ unsigned cmd_pipe = usb_sndbulkpipe(usb_dev,
+ dev->out_ep[MT_EP_OUT_INBAND_CMD]);
+ int sent, ret;
+ u8 seq = 0;
+
+ if (wait_resp)
+ while (!seq)
+ seq = ++dev->mcu.msg_seq & 0xf;
+
+ mt76x0_dma_skb_wrap_cmd(skb, seq, cmd);
+
+ if (dev->mcu.resp_cmpl.done)
+ dev_err(dev->mt76.dev, "Error: MCU response pre-completed!\n");
+
+ trace_mt76x0_mcu_msg_send_cs(&dev->mt76, skb, wait_resp);
+ trace_mt76x0_submit_urb_sync(&dev->mt76, cmd_pipe, skb->len);
+
+ ret = usb_bulk_msg(usb_dev, cmd_pipe, skb->data, skb->len, &sent, 500);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Error: send MCU cmd failed:%d\n", ret);
+ goto out;
+ }
+ if (sent != skb->len)
+ dev_err(dev->mt76.dev, "Error: %s sent != skb->len\n", __func__);
+
+ if (wait_resp)
+ ret = mt76x0_mcu_wait_resp(dev, seq);
+
+out:
+ return ret;
+}
+
+static int
+mt76x0_mcu_msg_send(struct mt76x0_dev *dev, struct sk_buff *skb,
+ enum mcu_cmd cmd, bool wait_resp)
+{
+ int ret;
+
+ if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ return 0;
+
+ mutex_lock(&dev->mcu.mutex);
+ ret = __mt76x0_mcu_msg_send(dev, skb, cmd, wait_resp);
+ mutex_unlock(&dev->mcu.mutex);
+
+ consume_skb(skb);
+
+ return ret;
+}
+
+int mt76x0_mcu_function_select(struct mt76x0_dev *dev,
+ enum mcu_function func, u32 val)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(func),
+ .value = cpu_to_le32(val),
+ };
+
+ skb = mt76x0_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76x0_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5);
+}
+
+int
+mt76x0_mcu_calibrate(struct mt76x0_dev *dev, enum mcu_calibrate cal, u32 val)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(cal),
+ .value = cpu_to_le32(val),
+ };
+
+ skb = mt76x0_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76x0_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true);
+}
+
+int mt76x0_write_reg_pairs(struct mt76x0_dev *dev, u32 base,
+ const struct mt76_reg_pair *data, int n)
+{
+ const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8;
+ struct sk_buff *skb;
+ int cnt, i, ret;
+
+ if (!n)
+ return 0;
+
+ cnt = min(max_vals_per_cmd, n);
+
+ skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+
+ for (i = 0; i < cnt; i++) {
+ skb_put_le32(skb, base + data[i].reg);
+ skb_put_le32(skb, data[i].value);
+ }
+
+ ret = mt76x0_mcu_msg_send(dev, skb, CMD_RANDOM_WRITE, cnt == n);
+ if (ret)
+ return ret;
+
+ return mt76x0_write_reg_pairs(dev, base, data + cnt, n - cnt);
+}
+
+int mt76x0_read_reg_pairs(struct mt76x0_dev *dev, u32 base,
+ struct mt76_reg_pair *data, int n)
+{
+ const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8;
+ struct sk_buff *skb;
+ int cnt, i, ret;
+
+ if (!n)
+ return 0;
+
+ cnt = min(max_vals_per_cmd, n);
+ if (cnt != n)
+ return -EINVAL;
+
+ skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+
+ for (i = 0; i < cnt; i++) {
+ skb_put_le32(skb, base + data[i].reg);
+ skb_put_le32(skb, data[i].value);
+ }
+
+ mutex_lock(&dev->mcu.mutex);
+
+ dev->mcu.reg_pairs = data;
+ dev->mcu.reg_pairs_len = n;
+ dev->mcu.reg_base = base;
+ dev->mcu.burst_read = false;
+
+ ret = __mt76x0_mcu_msg_send(dev, skb, CMD_RANDOM_READ, true);
+
+ dev->mcu.reg_pairs = NULL;
+
+ mutex_unlock(&dev->mcu.mutex);
+
+ consume_skb(skb);
+
+ return ret;
+
+}
+
+int mt76x0_burst_write_regs(struct mt76x0_dev *dev, u32 offset,
+ const u32 *data, int n)
+{
+ const int max_regs_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1;
+ struct sk_buff *skb;
+ int cnt, i, ret;
+
+ if (!n)
+ return 0;
+
+ cnt = min(max_regs_per_cmd, n);
+
+ skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+
+ skb_put_le32(skb, MT_MCU_MEMMAP_WLAN + offset);
+ for (i = 0; i < cnt; i++)
+ skb_put_le32(skb, data[i]);
+
+ ret = mt76x0_mcu_msg_send(dev, skb, CMD_BURST_WRITE, cnt == n);
+ if (ret)
+ return ret;
+
+ return mt76x0_burst_write_regs(dev, offset + cnt * 4,
+ data + cnt, n - cnt);
+}
+
+#if 0
+static int mt76x0_burst_read_regs(struct mt76x0_dev *dev, u32 base,
+ struct mt76_reg_pair *data, int n)
+{
+ const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1;
+ struct sk_buff *skb;
+ int cnt, ret;
+
+ if (!n)
+ return 0;
+
+ cnt = min(max_vals_per_cmd, n);
+ if (cnt != n)
+ return -EINVAL;
+
+ skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+
+ skb_put_le32(skb, base + data[0].reg);
+ skb_put_le32(skb, n);
+
+ mutex_lock(&dev->mcu.mutex);
+
+ dev->mcu.reg_pairs = data;
+ dev->mcu.reg_pairs_len = n;
+ dev->mcu.reg_base = base;
+ dev->mcu.burst_read = true;
+
+ ret = __mt76x0_mcu_msg_send(dev, skb, CMD_BURST_READ, true);
+
+ dev->mcu.reg_pairs = NULL;
+
+ mutex_unlock(&dev->mcu.mutex);
+
+ consume_skb(skb);
+
+ return ret;
+}
+#endif
+
+struct mt76_fw_header {
+ __le32 ilm_len;
+ __le32 dlm_len;
+ __le16 build_ver;
+ __le16 fw_ver;
+ u8 pad[4];
+ char build_time[16];
+};
+
+struct mt76_fw {
+ struct mt76_fw_header hdr;
+ u8 ivb[MT_MCU_IVB_SIZE];
+ u8 ilm[];
+};
+
+static int __mt76x0_dma_fw(struct mt76x0_dev *dev,
+ const struct mt76x0_dma_buf *dma_buf,
+ const void *data, u32 len, u32 dst_addr)
+{
+ DECLARE_COMPLETION_ONSTACK(cmpl);
+ struct mt76x0_dma_buf buf = *dma_buf; /* we need to fake length */
+ __le32 reg;
+ u32 val;
+ int ret;
+
+ reg = cpu_to_le32(FIELD_PREP(MT_TXD_INFO_TYPE, DMA_COMMAND) |
+ FIELD_PREP(MT_TXD_INFO_D_PORT, CPU_TX_PORT) |
+ FIELD_PREP(MT_TXD_INFO_LEN, len));
+ memcpy(buf.buf, &reg, sizeof(reg));
+ memcpy(buf.buf + sizeof(reg), data, len);
+ memset(buf.buf + sizeof(reg) + len, 0, 8);
+
+ ret = mt76x0_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
+ MT_FCE_DMA_ADDR, dst_addr);
+ if (ret)
+ return ret;
+ len = roundup(len, 4);
+ ret = mt76x0_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
+ MT_FCE_DMA_LEN, len << 16);
+ if (ret)
+ return ret;
+
+ buf.len = MT_DMA_HDR_LEN + len + 4;
+ ret = mt76x0_usb_submit_buf(dev, USB_DIR_OUT, MT_EP_OUT_INBAND_CMD,
+ &buf, GFP_KERNEL,
+ mt76x0_complete_urb, &cmpl);
+ if (ret)
+ return ret;
+
+ if (!wait_for_completion_timeout(&cmpl, msecs_to_jiffies(1000))) {
+ dev_err(dev->mt76.dev, "Error: firmware upload timed out\n");
+ usb_kill_urb(buf.urb);
+ return -ETIMEDOUT;
+ }
+ if (mt76x0_urb_has_error(buf.urb)) {
+ dev_err(dev->mt76.dev, "Error: firmware upload urb failed:%d\n",
+ buf.urb->status);
+ return buf.urb->status;
+ }
+
+ val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
+ val++;
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
+
+ msleep(5);
+
+ return 0;
+}
+
+static int
+mt76x0_dma_fw(struct mt76x0_dev *dev, struct mt76x0_dma_buf *dma_buf,
+ const void *data, int len, u32 dst_addr)
+{
+ int n, ret;
+
+ if (len == 0)
+ return 0;
+
+ n = min(MCU_FW_URB_MAX_PAYLOAD, len);
+ ret = __mt76x0_dma_fw(dev, dma_buf, data, n, dst_addr);
+ if (ret)
+ return ret;
+
+#if 0
+ if (!mt76_poll_msec(dev, MT_MCU_COM_REG1, BIT(31), BIT(31), 500))
+ return -ETIMEDOUT;
+#endif
+
+ return mt76x0_dma_fw(dev, dma_buf, data + n, len - n, dst_addr + n);
+}
+
+static int
+mt76x0_upload_firmware(struct mt76x0_dev *dev, const struct mt76_fw *fw)
+{
+ struct mt76x0_dma_buf dma_buf;
+ void *ivb;
+ u32 ilm_len, dlm_len;
+ int i, ret;
+
+ ivb = kmemdup(fw->ivb, sizeof(fw->ivb), GFP_KERNEL);
+ if (!ivb)
+ return -ENOMEM;
+ if (mt76x0_usb_alloc_buf(dev, MCU_FW_URB_SIZE, &dma_buf)) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ilm_len = le32_to_cpu(fw->hdr.ilm_len) - sizeof(fw->ivb);
+ dev_dbg(dev->mt76.dev, "loading FW - ILM %u + IVB %zu\n",
+ ilm_len, sizeof(fw->ivb));
+ ret = mt76x0_dma_fw(dev, &dma_buf, fw->ilm, ilm_len, sizeof(fw->ivb));
+ if (ret)
+ goto error;
+
+ dlm_len = le32_to_cpu(fw->hdr.dlm_len);
+ dev_dbg(dev->mt76.dev, "loading FW - DLM %u\n", dlm_len);
+ ret = mt76x0_dma_fw(dev, &dma_buf, fw->ilm + ilm_len,
+ dlm_len, MT_MCU_DLM_OFFSET);
+ if (ret)
+ goto error;
+
+ ret = mt76x0_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
+ 0x12, 0, ivb, sizeof(fw->ivb));
+ if (ret < 0)
+ goto error;
+ ret = 0;
+
+ for (i = 100; i && !firmware_running(dev); i--)
+ msleep(10);
+ if (!i) {
+ ret = -ETIMEDOUT;
+ goto error;
+ }
+
+ dev_dbg(dev->mt76.dev, "Firmware running!\n");
+error:
+ kfree(ivb);
+ mt76x0_usb_free_buf(dev, &dma_buf);
+
+ return ret;
+}
+
+static int mt76x0_load_firmware(struct mt76x0_dev *dev)
+{
+ const struct firmware *fw;
+ const struct mt76_fw_header *hdr;
+ int len, ret;
+ u32 val;
+
+ mt76_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN));
+
+ if (firmware_running(dev))
+ return 0;
+
+ ret = request_firmware(&fw, MT7610_FIRMWARE, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr))
+ goto err_inv_fw;
+
+ hdr = (const struct mt76_fw_header *) fw->data;
+
+ if (le32_to_cpu(hdr->ilm_len) <= MT_MCU_IVB_SIZE)
+ goto err_inv_fw;
+
+ len = sizeof(*hdr);
+ len += le32_to_cpu(hdr->ilm_len);
+ len += le32_to_cpu(hdr->dlm_len);
+
+ if (fw->size != len)
+ goto err_inv_fw;
+
+ val = le16_to_cpu(hdr->fw_ver);
+ dev_dbg(dev->mt76.dev,
+ "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n",
+ (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf,
+ le16_to_cpu(hdr->build_ver), hdr->build_time);
+
+ len = le32_to_cpu(hdr->ilm_len);
+
+ mt76_wr(dev, 0x1004, 0x2c);
+
+ mt76_set(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN) |
+ FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20));
+ mt76x0_vendor_reset(dev);
+ msleep(5);
+/*
+ mt76x0_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN |
+ MT_PBF_CFG_TX1Q_EN |
+ MT_PBF_CFG_TX2Q_EN |
+ MT_PBF_CFG_TX3Q_EN));
+*/
+
+ mt76_wr(dev, MT_FCE_PSE_CTRL, 1);
+
+ /* FCE tx_fs_base_ptr */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+ /* FCE tx_fs_max_cnt */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 1);
+ /* FCE pdma enable */
+ mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+ /* FCE skip_fs_en */
+ mt76_wr(dev, MT_FCE_SKIP_FS, 3);
+
+ val = mt76_rr(dev, MT_USB_DMA_CFG);
+ val |= MT_USB_DMA_CFG_TX_WL_DROP;
+ mt76_wr(dev, MT_USB_DMA_CFG, val);
+ val &= ~MT_USB_DMA_CFG_TX_WL_DROP;
+ mt76_wr(dev, MT_USB_DMA_CFG, val);
+
+ ret = mt76x0_upload_firmware(dev, (const struct mt76_fw *)fw->data);
+ release_firmware(fw);
+
+ mt76_wr(dev, MT_FCE_PSE_CTRL, 1);
+
+ return ret;
+
+err_inv_fw:
+ dev_err(dev->mt76.dev, "Invalid firmware image\n");
+ release_firmware(fw);
+ return -ENOENT;
+}
+
+int mt76x0_mcu_init(struct mt76x0_dev *dev)
+{
+ int ret;
+
+ mutex_init(&dev->mcu.mutex);
+
+ ret = mt76x0_load_firmware(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+
+ return 0;
+}
+
+int mt76x0_mcu_cmd_init(struct mt76x0_dev *dev)
+{
+ int ret;
+
+ ret = mt76x0_mcu_function_select(dev, Q_SELECT, 1);
+ if (ret)
+ return ret;
+
+ init_completion(&dev->mcu.resp_cmpl);
+ if (mt76x0_usb_alloc_buf(dev, MCU_RESP_URB_SIZE, &dev->mcu.resp)) {
+ mt76x0_usb_free_buf(dev, &dev->mcu.resp);
+ return -ENOMEM;
+ }
+
+ ret = mt76x0_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
+ &dev->mcu.resp, GFP_KERNEL,
+ mt76x0_complete_urb, &dev->mcu.resp_cmpl);
+ if (ret) {
+ mt76x0_usb_free_buf(dev, &dev->mcu.resp);
+ return ret;
+ }
+
+ return 0;
+}
+
+void mt76x0_mcu_cmd_deinit(struct mt76x0_dev *dev)
+{
+ usb_kill_urb(dev->mcu.resp.urb);
+ mt76x0_usb_free_buf(dev, &dev->mcu.resp);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h
new file mode 100644
index 000000000000..8c2f77f4c3f5
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76X0U_MCU_H
+#define __MT76X0U_MCU_H
+
+struct mt76x0_dev;
+
+/* Register definitions */
+#define MT_MCU_RESET_CTL 0x070C
+#define MT_MCU_INT_LEVEL 0x0718
+#define MT_MCU_COM_REG0 0x0730
+#define MT_MCU_COM_REG1 0x0734
+#define MT_MCU_COM_REG2 0x0738
+#define MT_MCU_COM_REG3 0x073C
+
+#define MT_MCU_IVB_SIZE 0x40
+#define MT_MCU_DLM_OFFSET 0x80000
+
+#define MT_MCU_MEMMAP_WLAN 0x00410000
+/* We use same space for BBP as for MAC regs
+ * #define MT_MCU_MEMMAP_BBP 0x40000000
+ */
+#define MT_MCU_MEMMAP_RF 0x80000000
+
+#define INBAND_PACKET_MAX_LEN 192
+
+enum mcu_cmd {
+ CMD_FUN_SET_OP = 1,
+ CMD_LOAD_CR = 2,
+ CMD_INIT_GAIN_OP = 3,
+ CMD_DYNC_VGA_OP = 6,
+ CMD_TDLS_CH_SW = 7,
+ CMD_BURST_WRITE = 8,
+ CMD_READ_MODIFY_WRITE = 9,
+ CMD_RANDOM_READ = 10,
+ CMD_BURST_READ = 11,
+ CMD_RANDOM_WRITE = 12,
+ CMD_LED_MODE_OP = 16,
+ CMD_POWER_SAVING_OP = 20,
+ CMD_WOW_CONFIG = 21,
+ CMD_WOW_QUERY = 22,
+ CMD_WOW_FEATURE = 24,
+ CMD_CARRIER_DETECT_OP = 28,
+ CMD_RADOR_DETECT_OP = 29,
+ CMD_SWITCH_CHANNEL_OP = 30,
+ CMD_CALIBRATION_OP = 31,
+ CMD_BEACON_OP = 32,
+ CMD_ANTENNA_OP = 33,
+};
+
+enum mcu_function {
+ Q_SELECT = 1,
+ BW_SETTING = 2,
+ ATOMIC_TSSI_SETTING = 5,
+};
+
+enum mcu_power_mode {
+ RADIO_OFF = 0x30,
+ RADIO_ON = 0x31,
+ RADIO_OFF_AUTO_WAKEUP = 0x32,
+ RADIO_OFF_ADVANCE = 0x33,
+ RADIO_ON_ADVANCE = 0x34,
+};
+
+enum mcu_calibrate {
+ MCU_CAL_R = 1,
+ MCU_CAL_RXDCOC,
+ MCU_CAL_LC,
+ MCU_CAL_LOFT,
+ MCU_CAL_TXIQ,
+ MCU_CAL_BW,
+ MCU_CAL_DPD,
+ MCU_CAL_RXIQ,
+ MCU_CAL_TXDCOC,
+ MCU_CAL_RX_GROUP_DELAY,
+ MCU_CAL_TX_GROUP_DELAY,
+};
+
+int mt76x0_mcu_init(struct mt76x0_dev *dev);
+int mt76x0_mcu_cmd_init(struct mt76x0_dev *dev);
+void mt76x0_mcu_cmd_deinit(struct mt76x0_dev *dev);
+
+int
+mt76x0_mcu_calibrate(struct mt76x0_dev *dev, enum mcu_calibrate cal, u32 val);
+
+int
+mt76x0_mcu_function_select(struct mt76x0_dev *dev, enum mcu_function func, u32 val);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
new file mode 100644
index 000000000000..fc9857f61771
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
@@ -0,0 +1,330 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MT76X0U_H
+#define MT76X0U_H
+
+#include <linux/bitfield.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/usb.h>
+#include <linux/completion.h>
+#include <net/mac80211.h>
+#include <linux/debugfs.h>
+
+#include "../mt76.h"
+#include "regs.h"
+
+#define MT_CALIBRATE_INTERVAL (4 * HZ)
+
+#define MT_FREQ_CAL_INIT_DELAY (30 * HZ)
+#define MT_FREQ_CAL_CHECK_INTERVAL (10 * HZ)
+#define MT_FREQ_CAL_ADJ_INTERVAL (HZ / 2)
+
+#define MT_BBP_REG_VERSION 0x00
+
+#define MT_USB_AGGR_SIZE_LIMIT 21 /* * 1024B */
+#define MT_USB_AGGR_TIMEOUT 0x80 /* * 33ns */
+#define MT_RX_ORDER 3
+#define MT_RX_URB_SIZE (PAGE_SIZE << MT_RX_ORDER)
+
+struct mt76x0_dma_buf {
+ struct urb *urb;
+ void *buf;
+ dma_addr_t dma;
+ size_t len;
+};
+
+struct mt76x0_mcu {
+ struct mutex mutex;
+
+ u8 msg_seq;
+
+ struct mt76x0_dma_buf resp;
+ struct completion resp_cmpl;
+
+ struct mt76_reg_pair *reg_pairs;
+ unsigned int reg_pairs_len;
+ u32 reg_base;
+ bool burst_read;
+};
+
+struct mac_stats {
+ u64 rx_stat[6];
+ u64 tx_stat[6];
+ u64 aggr_stat[2];
+ u64 aggr_n[32];
+ u64 zero_len_del[2];
+};
+
+#define N_RX_ENTRIES 16
+struct mt76x0_rx_queue {
+ struct mt76x0_dev *dev;
+
+ struct mt76x0_dma_buf_rx {
+ struct urb *urb;
+ struct page *p;
+ } e[N_RX_ENTRIES];
+
+ unsigned int start;
+ unsigned int end;
+ unsigned int entries;
+ unsigned int pending;
+};
+
+#define N_TX_ENTRIES 64
+
+struct mt76x0_tx_queue {
+ struct mt76x0_dev *dev;
+
+ struct mt76x0_dma_buf_tx {
+ struct urb *urb;
+ struct sk_buff *skb;
+ } e[N_TX_ENTRIES];
+
+ unsigned int start;
+ unsigned int end;
+ unsigned int entries;
+ unsigned int used;
+ unsigned int fifo_seq;
+};
+
+/* WCID allocation:
+ * 0: mcast wcid
+ * 1: bssid wcid
+ * 1...: STAs
+ * ...7e: group wcids
+ * 7f: reserved
+ */
+#define N_WCIDS 128
+#define GROUP_WCID(idx) (254 - idx)
+
+struct mt76x0_eeprom_params;
+
+#define MT_EE_TEMPERATURE_SLOPE 39
+#define MT_FREQ_OFFSET_INVALID -128
+
+/* addr req mask */
+#define MT_VEND_TYPE_EEPROM BIT(31)
+#define MT_VEND_TYPE_CFG BIT(30)
+#define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
+
+#define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n))
+
+enum mt_bw {
+ MT_BW_20,
+ MT_BW_40,
+};
+
+/**
+ * struct mt76x0_dev - adapter structure
+ * @lock: protects @wcid->tx_rate.
+ * @mac_lock: locks out mac80211's tx status and rx paths.
+ * @tx_lock: protects @tx_q and changes of MT76_STATE_*_STATS
+ * flags in @state.
+ * @rx_lock: protects @rx_q.
+ * @con_mon_lock: protects @ap_bssid, @bcn_*, @avg_rssi.
+ * @mutex: ensures exclusive access from mac80211 callbacks.
+ * @reg_atomic_mutex: ensures atomicity of indirect register accesses
+ * (accesses to RF and BBP).
+ * @hw_atomic_mutex: ensures exclusive access to HW during critical
+ * operations (power management, channel switch).
+ */
+struct mt76x0_dev {
+ struct mt76_dev mt76; /* must be first */
+
+ struct mutex mutex;
+
+ struct mutex usb_ctrl_mtx;
+ u8 data[32];
+
+ struct tasklet_struct rx_tasklet;
+ struct tasklet_struct tx_tasklet;
+
+ u8 out_ep[__MT_EP_OUT_MAX];
+ u16 out_max_packet;
+ u8 in_ep[__MT_EP_IN_MAX];
+ u16 in_max_packet;
+
+ unsigned long wcid_mask[DIV_ROUND_UP(N_WCIDS, BITS_PER_LONG)];
+ unsigned long vif_mask;
+
+ struct mt76x0_mcu mcu;
+
+ struct delayed_work cal_work;
+ struct delayed_work mac_work;
+
+ struct workqueue_struct *stat_wq;
+ struct delayed_work stat_work;
+
+ struct mt76_wcid *mon_wcid;
+ struct mt76_wcid __rcu *wcid[N_WCIDS];
+
+ spinlock_t mac_lock;
+
+ const u16 *beacon_offsets;
+
+ u8 macaddr[ETH_ALEN];
+ struct mt76x0_eeprom_params *ee;
+
+ struct mutex reg_atomic_mutex;
+ struct mutex hw_atomic_mutex;
+
+ u32 rxfilter;
+ u32 debugfs_reg;
+
+ /* TX */
+ spinlock_t tx_lock;
+ struct mt76x0_tx_queue *tx_q;
+ struct sk_buff_head tx_skb_done;
+
+ atomic_t avg_ampdu_len;
+
+ /* RX */
+ spinlock_t rx_lock;
+ struct mt76x0_rx_queue rx_q;
+
+ /* Connection monitoring things */
+ spinlock_t con_mon_lock;
+ u8 ap_bssid[ETH_ALEN];
+
+ s8 bcn_freq_off;
+ u8 bcn_phy_mode;
+
+ int avg_rssi; /* starts at 0 and converges */
+
+ u8 agc_save;
+ u16 chainmask;
+
+ struct mac_stats stats;
+};
+
+struct mt76x0_wcid {
+ u8 idx;
+ u8 hw_key_idx;
+
+ u16 tx_rate;
+ bool tx_rate_set;
+ u8 tx_rate_nss;
+};
+
+struct mt76_vif {
+ u8 idx;
+
+ struct mt76_wcid group_wcid;
+};
+
+struct mt76_tx_status {
+ u8 valid:1;
+ u8 success:1;
+ u8 aggr:1;
+ u8 ack_req:1;
+ u8 is_probe:1;
+ u8 wcid;
+ u8 pktid;
+ u8 retry;
+ u16 rate;
+} __packed __aligned(2);
+
+struct mt76_sta {
+ struct mt76_wcid wcid;
+ struct mt76_tx_status status;
+ int n_frames;
+ u16 agg_ssn[IEEE80211_NUM_TIDS];
+};
+
+struct mt76_reg_pair {
+ u32 reg;
+ u32 value;
+};
+
+struct mt76x0_rxwi;
+
+extern const struct ieee80211_ops mt76x0_ops;
+
+static inline bool is_mt7610e(struct mt76x0_dev *dev)
+{
+ /* TODO */
+ return false;
+}
+
+void mt76x0_init_debugfs(struct mt76x0_dev *dev);
+
+int mt76x0_wait_asic_ready(struct mt76x0_dev *dev);
+
+/* Compatibility with mt76 */
+#define mt76_rmw_field(_dev, _reg, _field, _val) \
+ mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
+
+int mt76x0_write_reg_pairs(struct mt76x0_dev *dev, u32 base,
+ const struct mt76_reg_pair *data, int len);
+int mt76x0_read_reg_pairs(struct mt76x0_dev *dev, u32 base,
+ struct mt76_reg_pair *data, int len);
+int mt76x0_burst_write_regs(struct mt76x0_dev *dev, u32 offset,
+ const u32 *data, int n);
+void mt76x0_addr_wr(struct mt76x0_dev *dev, const u32 offset, const u8 *addr);
+
+/* Init */
+struct mt76x0_dev *mt76x0_alloc_device(struct device *dev);
+int mt76x0_init_hardware(struct mt76x0_dev *dev);
+int mt76x0_register_device(struct mt76x0_dev *dev);
+void mt76x0_cleanup(struct mt76x0_dev *dev);
+void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset);
+
+int mt76x0_mac_start(struct mt76x0_dev *dev);
+void mt76x0_mac_stop(struct mt76x0_dev *dev);
+
+/* PHY */
+void mt76x0_phy_init(struct mt76x0_dev *dev);
+int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev);
+void mt76x0_agc_save(struct mt76x0_dev *dev);
+void mt76x0_agc_restore(struct mt76x0_dev *dev);
+int mt76x0_phy_set_channel(struct mt76x0_dev *dev,
+ struct cfg80211_chan_def *chandef);
+void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev);
+int mt76x0_phy_get_rssi(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi);
+void mt76x0_phy_con_cal_onoff(struct mt76x0_dev *dev,
+ struct ieee80211_bss_conf *info);
+
+/* MAC */
+void mt76x0_mac_work(struct work_struct *work);
+void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot,
+ int ht_mode);
+void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb);
+void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval);
+void
+mt76x0_mac_wcid_setup(struct mt76x0_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
+void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev);
+
+/* TX */
+void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb);
+int mt76x0_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params);
+void mt76x0_tx_status(struct mt76x0_dev *dev, struct sk_buff *skb);
+void mt76x0_tx_stat(struct work_struct *work);
+
+/* util */
+void mt76x0_remove_hdr_pad(struct sk_buff *skb);
+int mt76x0_insert_hdr_pad(struct sk_buff *skb);
+
+int mt76x0_dma_init(struct mt76x0_dev *dev);
+void mt76x0_dma_cleanup(struct mt76x0_dev *dev);
+
+int mt76x0_dma_enqueue_tx(struct mt76x0_dev *dev, struct sk_buff *skb,
+ struct mt76_wcid *wcid, int hw_q);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
new file mode 100644
index 000000000000..5da7bfbe907f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -0,0 +1,1008 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+#include "mcu.h"
+#include "eeprom.h"
+#include "trace.h"
+#include "phy.h"
+#include "initvals.h"
+#include "initvals_phy.h"
+
+#include <linux/etherdevice.h>
+
+static int
+mt76x0_rf_csr_wr(struct mt76x0_dev *dev, u32 offset, u8 value)
+{
+ int ret = 0;
+ u8 bank, reg;
+
+ if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ return -ENODEV;
+
+ bank = MT_RF_BANK(offset);
+ reg = MT_RF_REG(offset);
+
+ if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8)
+ return -EINVAL;
+
+ mutex_lock(&dev->reg_atomic_mutex);
+
+ if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ mt76_wr(dev, MT_RF_CSR_CFG,
+ FIELD_PREP(MT_RF_CSR_CFG_DATA, value) |
+ FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) |
+ FIELD_PREP(MT_RF_CSR_CFG_REG_ID, reg) |
+ MT_RF_CSR_CFG_WR |
+ MT_RF_CSR_CFG_KICK);
+ trace_mt76x0_rf_write(&dev->mt76, bank, offset, value);
+out:
+ mutex_unlock(&dev->reg_atomic_mutex);
+
+ if (ret < 0)
+ dev_err(dev->mt76.dev, "Error: RF write %d:%d failed:%d!!\n",
+ bank, reg, ret);
+
+ return ret;
+}
+
+static int
+mt76x0_rf_csr_rr(struct mt76x0_dev *dev, u32 offset)
+{
+ int ret = -ETIMEDOUT;
+ u32 val;
+ u8 bank, reg;
+
+ if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ return -ENODEV;
+
+ bank = MT_RF_BANK(offset);
+ reg = MT_RF_REG(offset);
+
+ if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8)
+ return -EINVAL;
+
+ mutex_lock(&dev->reg_atomic_mutex);
+
+ if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
+ goto out;
+
+ mt76_wr(dev, MT_RF_CSR_CFG,
+ FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) |
+ FIELD_PREP(MT_RF_CSR_CFG_REG_ID, reg) |
+ MT_RF_CSR_CFG_KICK);
+
+ if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
+ goto out;
+
+ val = mt76_rr(dev, MT_RF_CSR_CFG);
+ if (FIELD_GET(MT_RF_CSR_CFG_REG_ID, val) == reg &&
+ FIELD_GET(MT_RF_CSR_CFG_REG_BANK, val) == bank) {
+ ret = FIELD_GET(MT_RF_CSR_CFG_DATA, val);
+ trace_mt76x0_rf_read(&dev->mt76, bank, offset, ret);
+ }
+out:
+ mutex_unlock(&dev->reg_atomic_mutex);
+
+ if (ret < 0)
+ dev_err(dev->mt76.dev, "Error: RF read %d:%d failed:%d!!\n",
+ bank, reg, ret);
+
+ return ret;
+}
+
+static int
+rf_wr(struct mt76x0_dev *dev, u32 offset, u8 val)
+{
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) {
+ struct mt76_reg_pair pair = {
+ .reg = offset,
+ .value = val,
+ };
+
+ return mt76x0_write_reg_pairs(dev, MT_MCU_MEMMAP_RF, &pair, 1);
+ } else {
+ WARN_ON_ONCE(1);
+ return mt76x0_rf_csr_wr(dev, offset, val);
+ }
+}
+
+static int
+rf_rr(struct mt76x0_dev *dev, u32 offset)
+{
+ int ret;
+ u32 val;
+
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) {
+ struct mt76_reg_pair pair = {
+ .reg = offset,
+ };
+
+ ret = mt76x0_read_reg_pairs(dev, MT_MCU_MEMMAP_RF, &pair, 1);
+ val = pair.value;
+ } else {
+ WARN_ON_ONCE(1);
+ ret = val = mt76x0_rf_csr_rr(dev, offset);
+ }
+
+ return (ret < 0) ? ret : val;
+}
+
+static int
+rf_rmw(struct mt76x0_dev *dev, u32 offset, u8 mask, u8 val)
+{
+ int ret;
+
+ ret = rf_rr(dev, offset);
+ if (ret < 0)
+ return ret;
+ val |= ret & ~mask;
+ ret = rf_wr(dev, offset, val);
+ if (ret)
+ return ret;
+
+ return val;
+}
+
+static int
+rf_set(struct mt76x0_dev *dev, u32 offset, u8 val)
+{
+ return rf_rmw(dev, offset, 0, val);
+}
+
+#if 0
+static int
+rf_clear(struct mt76x0_dev *dev, u32 offset, u8 mask)
+{
+ return rf_rmw(dev, offset, mask, 0);
+}
+#endif
+
+#define RF_RANDOM_WRITE(dev, tab) \
+ mt76x0_write_reg_pairs(dev, MT_MCU_MEMMAP_RF, tab, ARRAY_SIZE(tab));
+
+int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev)
+{
+ int i = 20;
+ u32 val;
+
+ do {
+ val = mt76_rr(dev, MT_BBP(CORE, 0));
+ printk("BBP version %08x\n", val);
+ if (val && ~val)
+ break;
+ } while (--i);
+
+ if (!i) {
+ dev_err(dev->mt76.dev, "Error: BBP is not ready\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void
+mt76x0_bbp_set_ctrlch(struct mt76x0_dev *dev, enum nl80211_chan_width width,
+ u8 ctrl)
+{
+ int core_val, agc_val;
+
+ switch (width) {
+ case NL80211_CHAN_WIDTH_80:
+ core_val = 3;
+ agc_val = 7;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ core_val = 2;
+ agc_val = 3;
+ break;
+ default:
+ core_val = 0;
+ agc_val = 1;
+ break;
+ }
+
+ mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
+ mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
+ mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
+ mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
+}
+
+int mt76x0_phy_get_rssi(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi)
+{
+ s8 lna_gain, rssi_offset;
+ int val;
+
+ if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ) {
+ lna_gain = dev->ee->lna_gain_2ghz;
+ rssi_offset = dev->ee->rssi_offset_2ghz[0];
+ } else {
+ lna_gain = dev->ee->lna_gain_5ghz[0];
+ rssi_offset = dev->ee->rssi_offset_5ghz[0];
+ }
+
+ val = rxwi->rssi[0] + rssi_offset - lna_gain;
+
+ return val;
+}
+
+static void mt76x0_vco_cal(struct mt76x0_dev *dev, u8 channel)
+{
+ u8 val;
+
+ val = rf_rr(dev, MT_RF(0, 4));
+ if ((val & 0x70) != 0x30)
+ return;
+
+ /*
+ * Calibration Mode - Open loop, closed loop, and amplitude:
+ * B0.R06.[0]: 1
+ * B0.R06.[3:1] bp_close_code: 100
+ * B0.R05.[7:0] bp_open_code: 0x0
+ * B0.R04.[2:0] cal_bits: 000
+ * B0.R03.[2:0] startup_time: 011
+ * B0.R03.[6:4] settle_time:
+ * 80MHz channel: 110
+ * 40MHz channel: 101
+ * 20MHz channel: 100
+ */
+ val = rf_rr(dev, MT_RF(0, 6));
+ val &= ~0xf;
+ val |= 0x09;
+ rf_wr(dev, MT_RF(0, 6), val);
+
+ val = rf_rr(dev, MT_RF(0, 5));
+ if (val != 0)
+ rf_wr(dev, MT_RF(0, 5), 0x0);
+
+ val = rf_rr(dev, MT_RF(0, 4));
+ val &= ~0x07;
+ rf_wr(dev, MT_RF(0, 4), val);
+
+ val = rf_rr(dev, MT_RF(0, 3));
+ val &= ~0x77;
+ if (channel == 1 || channel == 7 || channel == 9 || channel >= 13) {
+ val |= 0x63;
+ } else if (channel == 3 || channel == 4 || channel == 10) {
+ val |= 0x53;
+ } else if (channel == 2 || channel == 5 || channel == 6 ||
+ channel == 8 || channel == 11 || channel == 12) {
+ val |= 0x43;
+ } else {
+ WARN(1, "Unknown channel %u\n", channel);
+ return;
+ }
+ rf_wr(dev, MT_RF(0, 3), val);
+
+ /* TODO replace by mt76x0_rf_set(dev, MT_RF(0, 4), BIT(7)); */
+ val = rf_rr(dev, MT_RF(0, 4));
+ val = ((val & ~(0x80)) | 0x80);
+ rf_wr(dev, MT_RF(0, 4), val);
+
+ msleep(2);
+}
+
+static void
+mt76x0_mac_set_ctrlch(struct mt76x0_dev *dev, bool primary_upper)
+{
+ mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
+ primary_upper);
+}
+
+static void
+mt76x0_phy_set_band(struct mt76x0_dev *dev, enum nl80211_band band)
+{
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ RF_RANDOM_WRITE(dev, mt76x0_rf_2g_channel_0_tab);
+
+ rf_wr(dev, MT_RF(5, 0), 0x45);
+ rf_wr(dev, MT_RF(6, 0), 0x44);
+
+ mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+ mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+
+ mt76_wr(dev, MT_TX_ALC_VGA3, 0x00050007);
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x003E0002);
+ break;
+ case NL80211_BAND_5GHZ:
+ RF_RANDOM_WRITE(dev, mt76x0_rf_5g_channel_0_tab);
+
+ rf_wr(dev, MT_RF(5, 0), 0x44);
+ rf_wr(dev, MT_RF(6, 0), 0x45);
+
+ mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+ mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+
+ mt76_wr(dev, MT_TX_ALC_VGA3, 0x00000005);
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x01010102);
+ break;
+ default:
+ break;
+ }
+}
+
+#define EXT_PA_2G_5G 0x0
+#define EXT_PA_5G_ONLY 0x1
+#define EXT_PA_2G_ONLY 0x2
+#define INT_PA_2G_5G 0x3
+
+static void
+mt76x0_phy_set_chan_rf_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band)
+{
+ u16 rf_band = rf_bw_band & 0xff00;
+ u16 rf_bw = rf_bw_band & 0x00ff;
+ u32 mac_reg;
+ u8 rf_val;
+ int i;
+ bool bSDM = false;
+ const struct mt76x0_freq_item *freq_item;
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_sdm_channel); i++) {
+ if (channel == mt76x0_sdm_channel[i]) {
+ bSDM = true;
+ break;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_frequency_plan); i++) {
+ if (channel == mt76x0_frequency_plan[i].channel) {
+ rf_band = mt76x0_frequency_plan[i].band;
+
+ if (bSDM)
+ freq_item = &(mt76x0_sdm_frequency_plan[i]);
+ else
+ freq_item = &(mt76x0_frequency_plan[i]);
+
+ rf_wr(dev, MT_RF(0, 37), freq_item->pllR37);
+ rf_wr(dev, MT_RF(0, 36), freq_item->pllR36);
+ rf_wr(dev, MT_RF(0, 35), freq_item->pllR35);
+ rf_wr(dev, MT_RF(0, 34), freq_item->pllR34);
+ rf_wr(dev, MT_RF(0, 33), freq_item->pllR33);
+
+ rf_val = rf_rr(dev, MT_RF(0, 32));
+ rf_val &= ~0xE0;
+ rf_val |= freq_item->pllR32_b7b5;
+ rf_wr(dev, MT_RF(0, 32), rf_val);
+
+ /* R32<4:0> pll_den: (Denomina - 8) */
+ rf_val = rf_rr(dev, MT_RF(0, 32));
+ rf_val &= ~0x1F;
+ rf_val |= freq_item->pllR32_b4b0;
+ rf_wr(dev, MT_RF(0, 32), rf_val);
+
+ /* R31<7:5> */
+ rf_val = rf_rr(dev, MT_RF(0, 31));
+ rf_val &= ~0xE0;
+ rf_val |= freq_item->pllR31_b7b5;
+ rf_wr(dev, MT_RF(0, 31), rf_val);
+
+ /* R31<4:0> pll_k(Nominator) */
+ rf_val = rf_rr(dev, MT_RF(0, 31));
+ rf_val &= ~0x1F;
+ rf_val |= freq_item->pllR31_b4b0;
+ rf_wr(dev, MT_RF(0, 31), rf_val);
+
+ /* R30<7> sdm_reset_n */
+ rf_val = rf_rr(dev, MT_RF(0, 30));
+ rf_val &= ~0x80;
+ if (bSDM) {
+ rf_wr(dev, MT_RF(0, 30), rf_val);
+ rf_val |= 0x80;
+ rf_wr(dev, MT_RF(0, 30), rf_val);
+ } else {
+ rf_val |= freq_item->pllR30_b7;
+ rf_wr(dev, MT_RF(0, 30), rf_val);
+ }
+
+ /* R30<6:2> sdmmash_prbs,sin */
+ rf_val = rf_rr(dev, MT_RF(0, 30));
+ rf_val &= ~0x7C;
+ rf_val |= freq_item->pllR30_b6b2;
+ rf_wr(dev, MT_RF(0, 30), rf_val);
+
+ /* R30<1> sdm_bp */
+ rf_val = rf_rr(dev, MT_RF(0, 30));
+ rf_val &= ~0x02;
+ rf_val |= (freq_item->pllR30_b1 << 1);
+ rf_wr(dev, MT_RF(0, 30), rf_val);
+
+ /* R30<0> R29<7:0> (hex) pll_n */
+ rf_val = freq_item->pll_n & 0x00FF;
+ rf_wr(dev, MT_RF(0, 29), rf_val);
+
+ rf_val = rf_rr(dev, MT_RF(0, 30));
+ rf_val &= ~0x1;
+ rf_val |= ((freq_item->pll_n >> 8) & 0x0001);
+ rf_wr(dev, MT_RF(0, 30), rf_val);
+
+ /* R28<7:6> isi_iso */
+ rf_val = rf_rr(dev, MT_RF(0, 28));
+ rf_val &= ~0xC0;
+ rf_val |= freq_item->pllR28_b7b6;
+ rf_wr(dev, MT_RF(0, 28), rf_val);
+
+ /* R28<5:4> pfd_dly */
+ rf_val = rf_rr(dev, MT_RF(0, 28));
+ rf_val &= ~0x30;
+ rf_val |= freq_item->pllR28_b5b4;
+ rf_wr(dev, MT_RF(0, 28), rf_val);
+
+ /* R28<3:2> clksel option */
+ rf_val = rf_rr(dev, MT_RF(0, 28));
+ rf_val &= ~0x0C;
+ rf_val |= freq_item->pllR28_b3b2;
+ rf_wr(dev, MT_RF(0, 28), rf_val);
+
+ /* R28<1:0> R27<7:0> R26<7:0> (hex) sdm_k */
+ rf_val = freq_item->pll_sdm_k & 0x000000FF;
+ rf_wr(dev, MT_RF(0, 26), rf_val);
+
+ rf_val = ((freq_item->pll_sdm_k >> 8) & 0x000000FF);
+ rf_wr(dev, MT_RF(0, 27), rf_val);
+
+ rf_val = rf_rr(dev, MT_RF(0, 28));
+ rf_val &= ~0x3;
+ rf_val |= ((freq_item->pll_sdm_k >> 16) & 0x0003);
+ rf_wr(dev, MT_RF(0, 28), rf_val);
+
+ /* R24<1:0> xo_div */
+ rf_val = rf_rr(dev, MT_RF(0, 24));
+ rf_val &= ~0x3;
+ rf_val |= freq_item->pllR24_b1b0;
+ rf_wr(dev, MT_RF(0, 24), rf_val);
+
+ break;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_rf_bw_switch_tab); i++) {
+ if (rf_bw == mt76x0_rf_bw_switch_tab[i].bw_band) {
+ rf_wr(dev, mt76x0_rf_bw_switch_tab[i].rf_bank_reg,
+ mt76x0_rf_bw_switch_tab[i].value);
+ } else if ((rf_bw == (mt76x0_rf_bw_switch_tab[i].bw_band & 0xFF)) &&
+ (rf_band & mt76x0_rf_bw_switch_tab[i].bw_band)) {
+ rf_wr(dev, mt76x0_rf_bw_switch_tab[i].rf_bank_reg,
+ mt76x0_rf_bw_switch_tab[i].value);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_rf_band_switch_tab); i++) {
+ if (mt76x0_rf_band_switch_tab[i].bw_band & rf_band) {
+ rf_wr(dev, mt76x0_rf_band_switch_tab[i].rf_bank_reg,
+ mt76x0_rf_band_switch_tab[i].value);
+ }
+ }
+
+ mac_reg = mt76_rr(dev, MT_RF_MISC);
+ mac_reg &= ~0xC; /* Clear 0x518[3:2] */
+ mt76_wr(dev, MT_RF_MISC, mac_reg);
+
+ if (dev->ee->pa_type == INT_PA_2G_5G ||
+ (dev->ee->pa_type == EXT_PA_5G_ONLY && (rf_band & RF_G_BAND)) ||
+ (dev->ee->pa_type == EXT_PA_2G_ONLY && (rf_band & RF_A_BAND))) {
+ ; /* Internal PA - nothing to do. */
+ } else {
+ /*
+ MT_RF_MISC (offset: 0x0518)
+ [2]1'b1: enable external A band PA, 1'b0: disable external A band PA
+ [3]1'b1: enable external G band PA, 1'b0: disable external G band PA
+ */
+ if (rf_band & RF_A_BAND) {
+ mac_reg = mt76_rr(dev, MT_RF_MISC);
+ mac_reg |= 0x4;
+ mt76_wr(dev, MT_RF_MISC, mac_reg);
+ } else {
+ mac_reg = mt76_rr(dev, MT_RF_MISC);
+ mac_reg |= 0x8;
+ mt76_wr(dev, MT_RF_MISC, mac_reg);
+ }
+
+ /* External PA */
+ for (i = 0; i < ARRAY_SIZE(mt76x0_rf_ext_pa_tab); i++)
+ if (mt76x0_rf_ext_pa_tab[i].bw_band & rf_band)
+ rf_wr(dev, mt76x0_rf_ext_pa_tab[i].rf_bank_reg,
+ mt76x0_rf_ext_pa_tab[i].value);
+ }
+
+ if (rf_band & RF_G_BAND) {
+ mt76_wr(dev, MT_TX0_RF_GAIN_ATTEN, 0x63707400);
+ /* Set Atten mode = 2 For G band, Disable Tx Inc dcoc. */
+ mac_reg = mt76_rr(dev, MT_TX_ALC_CFG_1);
+ mac_reg &= 0x896400FF;
+ mt76_wr(dev, MT_TX_ALC_CFG_1, mac_reg);
+ } else {
+ mt76_wr(dev, MT_TX0_RF_GAIN_ATTEN, 0x686A7800);
+ /* Set Atten mode = 0 For Ext A band, Disable Tx Inc dcoc Cal. */
+ mac_reg = mt76_rr(dev, MT_TX_ALC_CFG_1);
+ mac_reg &= 0x890400FF;
+ mt76_wr(dev, MT_TX_ALC_CFG_1, mac_reg);
+ }
+}
+
+static void
+mt76x0_phy_set_chan_bbp_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_bbp_switch_tab); i++) {
+ const struct mt76x0_bbp_switch_item *item = &mt76x0_bbp_switch_tab[i];
+ const struct mt76_reg_pair *pair = &item->reg_pair;
+
+ if ((rf_bw_band & item->bw_band) != rf_bw_band)
+ continue;
+
+ if (pair->reg == MT_BBP(AGC, 8)) {
+ u32 val = pair->value;
+ u8 gain = FIELD_GET(MT_BBP_AGC_GAIN, val);
+
+ if (channel > 14) {
+ if (channel < 100)
+ gain -= dev->ee->lna_gain_5ghz[0]*2;
+ else if (channel < 137)
+ gain -= dev->ee->lna_gain_5ghz[1]*2;
+ else
+ gain -= dev->ee->lna_gain_5ghz[2]*2;
+
+ } else {
+ gain -= dev->ee->lna_gain_2ghz*2;
+ }
+
+ val &= ~MT_BBP_AGC_GAIN;
+ val |= FIELD_PREP(MT_BBP_AGC_GAIN, gain);
+ mt76_wr(dev, pair->reg, val);
+ } else {
+ mt76_wr(dev, pair->reg, pair->value);
+ }
+ }
+}
+
+#if 0
+static void
+mt76x0_extra_power_over_mac(struct mt76x0_dev *dev)
+{
+ u32 val;
+
+ val = ((mt76_rr(dev, MT_TX_PWR_CFG_1) & 0x00003f00) >> 8);
+ val |= ((mt76_rr(dev, MT_TX_PWR_CFG_2) & 0x00003f00) << 8);
+ mt76_wr(dev, MT_TX_PWR_CFG_7, val);
+
+ /* TODO: fix VHT */
+ val = ((mt76_rr(dev, MT_TX_PWR_CFG_3) & 0x0000ff00) >> 8);
+ mt76_wr(dev, MT_TX_PWR_CFG_8, val);
+
+ val = ((mt76_rr(dev, MT_TX_PWR_CFG_4) & 0x0000ff00) >> 8);
+ mt76_wr(dev, MT_TX_PWR_CFG_9, val);
+}
+
+static void
+mt76x0_phy_set_tx_power(struct mt76x0_dev *dev, u8 channel, u8 rf_bw_band)
+{
+ u32 val;
+ int i;
+ int bw = (rf_bw_band & RF_BW_20) ? 0 : 1;
+
+ for (i = 0; i < 4; i++) {
+ if (channel <= 14)
+ val = dev->ee->tx_pwr_cfg_2g[i][bw];
+ else
+ val = dev->ee->tx_pwr_cfg_5g[i][bw];
+
+ mt76_wr(dev, MT_TX_PWR_CFG_0 + 4*i, val);
+ }
+
+ mt76x0_extra_power_over_mac(dev);
+}
+#endif
+
+static void
+mt76x0_bbp_set_bw(struct mt76x0_dev *dev, enum nl80211_chan_width width)
+{
+ enum { BW_20 = 0, BW_40 = 1, BW_80 = 2, BW_10 = 4};
+ int bw;
+
+ switch (width) {
+ default:
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ bw = BW_20;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ bw = BW_40;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ bw = BW_80;
+ break;
+ case NL80211_CHAN_WIDTH_10:
+ bw = BW_10;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ case NL80211_CHAN_WIDTH_5:
+ /* TODO error */
+ return ;
+ }
+
+ mt76x0_mcu_function_select(dev, BW_SETTING, bw);
+}
+
+static void
+mt76x0_phy_set_chan_pwr(struct mt76x0_dev *dev, u8 channel)
+{
+ static const int mt76x0_tx_pwr_ch_list[] = {
+ 1,2,3,4,5,6,7,8,9,10,11,12,13,14,
+ 36,38,40,44,46,48,52,54,56,60,62,64,
+ 100,102,104,108,110,112,116,118,120,124,126,128,132,134,136,140,
+ 149,151,153,157,159,161,165,167,169,171,173,
+ 42,58,106,122,155
+ };
+ int i;
+ u32 val;
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_tx_pwr_ch_list); i++)
+ if (mt76x0_tx_pwr_ch_list[i] == channel)
+ break;
+
+ if (WARN_ON(i == ARRAY_SIZE(mt76x0_tx_pwr_ch_list)))
+ return;
+
+ val = mt76_rr(dev, MT_TX_ALC_CFG_0);
+ val &= ~0x3f3f;
+ val |= dev->ee->tx_pwr_per_chan[i];
+ val |= 0x2f2f << 16;
+ mt76_wr(dev, MT_TX_ALC_CFG_0, val);
+}
+
+static int
+__mt76x0_phy_set_channel(struct mt76x0_dev *dev,
+ struct cfg80211_chan_def *chandef)
+{
+ u32 ext_cca_chan[4] = {
+ [0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)),
+ [1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)),
+ [2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)),
+ [3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
+ };
+ bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
+ int ch_group_index, freq, freq1;
+ u8 channel;
+ u32 val;
+ u16 rf_bw_band;
+
+ freq = chandef->chan->center_freq;
+ freq1 = chandef->center_freq1;
+ channel = chandef->chan->hw_value;
+ rf_bw_band = (channel <= 14) ? RF_G_BAND : RF_A_BAND;
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_40:
+ if (freq1 > freq)
+ ch_group_index = 0;
+ else
+ ch_group_index = 1;
+ channel += 2 - ch_group_index * 4;
+ rf_bw_band |= RF_BW_40;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ ch_group_index = (freq - freq1 + 30) / 20;
+ if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
+ ch_group_index = 0;
+ channel += 6 - ch_group_index * 4;
+ rf_bw_band |= RF_BW_80;
+ break;
+ default:
+ ch_group_index = 0;
+ rf_bw_band |= RF_BW_20;
+ break;
+ }
+
+ mt76x0_bbp_set_bw(dev, chandef->width);
+ mt76x0_bbp_set_ctrlch(dev, chandef->width, ch_group_index);
+ mt76x0_mac_set_ctrlch(dev, ch_group_index & 1);
+
+ mt76_rmw(dev, MT_EXT_CCA_CFG,
+ (MT_EXT_CCA_CFG_CCA0 |
+ MT_EXT_CCA_CFG_CCA1 |
+ MT_EXT_CCA_CFG_CCA2 |
+ MT_EXT_CCA_CFG_CCA3 |
+ MT_EXT_CCA_CFG_CCA_MASK),
+ ext_cca_chan[ch_group_index]);
+
+ mt76x0_phy_set_band(dev, chandef->chan->band);
+ mt76x0_phy_set_chan_rf_params(dev, channel, rf_bw_band);
+
+ /* set Japan Tx filter at channel 14 */
+ val = mt76_rr(dev, MT_BBP(CORE, 1));
+ if (channel == 14)
+ val |= 0x20;
+ else
+ val &= ~0x20;
+ mt76_wr(dev, MT_BBP(CORE, 1), val);
+
+ mt76x0_phy_set_chan_bbp_params(dev, channel, rf_bw_band);
+
+ /* Vendor driver don't do it */
+ /* mt76x0_phy_set_tx_power(dev, channel, rf_bw_band); */
+
+ if (scan)
+ mt76x0_vco_cal(dev, channel);
+
+ mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1);
+ mt76x0_phy_set_chan_pwr(dev, channel);
+
+ dev->mt76.chandef = *chandef;
+ return 0;
+}
+
+int mt76x0_phy_set_channel(struct mt76x0_dev *dev,
+ struct cfg80211_chan_def *chandef)
+{
+ int ret;
+
+ mutex_lock(&dev->hw_atomic_mutex);
+ ret = __mt76x0_phy_set_channel(dev, chandef);
+ mutex_unlock(&dev->hw_atomic_mutex);
+
+ return ret;
+}
+
+void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev)
+{
+ u32 tx_alc, reg_val;
+ u8 channel = dev->mt76.chandef.chan->hw_value;
+ int is_5ghz = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 1 : 0;
+
+ mt76x0_mcu_calibrate(dev, MCU_CAL_R, 0);
+
+ mt76x0_vco_cal(dev, channel);
+
+ tx_alc = mt76_rr(dev, MT_TX_ALC_CFG_0);
+ mt76_wr(dev, MT_TX_ALC_CFG_0, 0);
+ usleep_range(500, 700);
+
+ reg_val = mt76_rr(dev, 0x2124);
+ reg_val &= 0xffffff7e;
+ mt76_wr(dev, 0x2124, reg_val);
+
+ mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 0);
+
+ mt76x0_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz);
+ mt76x0_mcu_calibrate(dev, MCU_CAL_LOFT, is_5ghz);
+ mt76x0_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
+ mt76x0_mcu_calibrate(dev, MCU_CAL_TX_GROUP_DELAY, is_5ghz);
+ mt76x0_mcu_calibrate(dev, MCU_CAL_RXIQ, is_5ghz);
+ mt76x0_mcu_calibrate(dev, MCU_CAL_RX_GROUP_DELAY, is_5ghz);
+
+ mt76_wr(dev, 0x2124, reg_val);
+ mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc);
+ msleep(100);
+
+ mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1);
+}
+
+void mt76x0_agc_save(struct mt76x0_dev *dev)
+{
+ /* Only one RX path */
+ dev->agc_save = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, 8)));
+}
+
+void mt76x0_agc_restore(struct mt76x0_dev *dev)
+{
+ mt76_rmw_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN, dev->agc_save);
+}
+
+static void mt76x0_temp_sensor(struct mt76x0_dev *dev)
+{
+ u8 rf_b7_73, rf_b0_66, rf_b0_67;
+ int cycle, temp;
+ u32 val;
+ s32 sval;
+
+ rf_b7_73 = rf_rr(dev, MT_RF(7, 73));
+ rf_b0_66 = rf_rr(dev, MT_RF(0, 66));
+ rf_b0_67 = rf_rr(dev, MT_RF(0, 73));
+
+ rf_wr(dev, MT_RF(7, 73), 0x02);
+ rf_wr(dev, MT_RF(0, 66), 0x23);
+ rf_wr(dev, MT_RF(0, 73), 0x01);
+
+ mt76_wr(dev, MT_BBP(CORE, 34), 0x00080055);
+
+ for (cycle = 0; cycle < 2000; cycle++) {
+ val = mt76_rr(dev, MT_BBP(CORE, 34));
+ if (!(val & 0x10))
+ break;
+ udelay(3);
+ }
+
+ if (cycle >= 2000) {
+ val &= 0x10;
+ mt76_wr(dev, MT_BBP(CORE, 34), val);
+ goto done;
+ }
+
+ sval = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff;
+ if (!(sval & 0x80))
+ sval &= 0x7f; /* Positive */
+ else
+ sval |= 0xffffff00; /* Negative */
+
+ temp = (35 * (sval - dev->ee->temp_off))/ 10 + 25;
+
+done:
+ rf_wr(dev, MT_RF(7, 73), rf_b7_73);
+ rf_wr(dev, MT_RF(0, 66), rf_b0_66);
+ rf_wr(dev, MT_RF(0, 73), rf_b0_67);
+}
+
+static void mt76x0_dynamic_vga_tuning(struct mt76x0_dev *dev)
+{
+ u32 val, init_vga;
+
+ init_vga = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 0x54 : 0x4E;
+ if (dev->avg_rssi > -60)
+ init_vga -= 0x20;
+ else if (dev->avg_rssi > -70)
+ init_vga -= 0x10;
+
+ val = mt76_rr(dev, MT_BBP(AGC, 8));
+ val &= 0xFFFF80FF;
+ val |= init_vga << 8;
+ mt76_wr(dev, MT_BBP(AGC,8), val);
+}
+
+static void mt76x0_phy_calibrate(struct work_struct *work)
+{
+ struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev,
+ cal_work.work);
+
+ mt76x0_dynamic_vga_tuning(dev);
+ mt76x0_temp_sensor(dev);
+
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+}
+
+void mt76x0_phy_con_cal_onoff(struct mt76x0_dev *dev,
+ struct ieee80211_bss_conf *info)
+{
+ /* Start/stop collecting beacon data */
+ spin_lock_bh(&dev->con_mon_lock);
+ ether_addr_copy(dev->ap_bssid, info->bssid);
+ dev->avg_rssi = 0;
+ dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID;
+ spin_unlock_bh(&dev->con_mon_lock);
+}
+
+static void
+mt76x0_set_rx_chains(struct mt76x0_dev *dev)
+{
+ u32 val;
+
+ val = mt76_rr(dev, MT_BBP(AGC, 0));
+ val &= ~(BIT(3) | BIT(4));
+
+ if (dev->chainmask & BIT(1))
+ val |= BIT(3);
+
+ mt76_wr(dev, MT_BBP(AGC, 0), val);
+
+ mb();
+ val = mt76_rr(dev, MT_BBP(AGC, 0));
+}
+
+static void
+mt76x0_set_tx_dac(struct mt76x0_dev *dev)
+{
+ if (dev->chainmask & BIT(1))
+ mt76_set(dev, MT_BBP(TXBE, 5), 3);
+ else
+ mt76_clear(dev, MT_BBP(TXBE, 5), 3);
+}
+
+static void
+mt76x0_rf_init(struct mt76x0_dev *dev)
+{
+ int i;
+ u8 val;
+
+ RF_RANDOM_WRITE(dev, mt76x0_rf_central_tab);
+ RF_RANDOM_WRITE(dev, mt76x0_rf_2g_channel_0_tab);
+ RF_RANDOM_WRITE(dev, mt76x0_rf_5g_channel_0_tab);
+ RF_RANDOM_WRITE(dev, mt76x0_rf_vga_channel_0_tab);
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_rf_bw_switch_tab); i++) {
+ const struct mt76x0_rf_switch_item *item = &mt76x0_rf_bw_switch_tab[i];
+
+ if (item->bw_band == RF_BW_20)
+ rf_wr(dev, item->rf_bank_reg, item->value);
+ else if (((RF_G_BAND | RF_BW_20) & item->bw_band) == (RF_G_BAND | RF_BW_20))
+ rf_wr(dev, item->rf_bank_reg, item->value);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_rf_band_switch_tab); i++) {
+ if (mt76x0_rf_band_switch_tab[i].bw_band & RF_G_BAND) {
+ rf_wr(dev,
+ mt76x0_rf_band_switch_tab[i].rf_bank_reg,
+ mt76x0_rf_band_switch_tab[i].value);
+ }
+ }
+
+ /*
+ Frequency calibration
+ E1: B0.R22<6:0>: xo_cxo<6:0>
+ E2: B0.R21<0>: xo_cxo<0>, B0.R22<7:0>: xo_cxo<8:1>
+ */
+ rf_wr(dev, MT_RF(0, 22), min_t(u8, dev->ee->rf_freq_off, 0xBF));
+ val = rf_rr(dev, MT_RF(0, 22));
+
+ /*
+ Reset the DAC (Set B0.R73<7>=1, then set B0.R73<7>=0, and then set B0.R73<7>) during power up.
+ */
+ val = rf_rr(dev, MT_RF(0, 73));
+ val |= 0x80;
+ rf_wr(dev, MT_RF(0, 73), val);
+ val &= ~0x80;
+ rf_wr(dev, MT_RF(0, 73), val);
+ val |= 0x80;
+ rf_wr(dev, MT_RF(0, 73), val);
+
+ /*
+ vcocal_en (initiate VCO calibration (reset after completion)) - It should be at the end of RF configuration.
+ */
+ rf_set(dev, MT_RF(0, 4), 0x80);
+}
+
+static void mt76x0_ant_select(struct mt76x0_dev *dev)
+{
+ /* Single antenna mode. */
+ mt76_rmw(dev, MT_WLAN_FUN_CTRL, BIT(5), BIT(6));
+ mt76_clear(dev, MT_CMB_CTRL, BIT(14) | BIT(12));
+ mt76_clear(dev, MT_COEXCFG0, BIT(2));
+ mt76_rmw(dev, MT_COEXCFG3, BIT(5) | BIT(4) | BIT(3) | BIT(2), BIT(1));
+}
+
+void mt76x0_phy_init(struct mt76x0_dev *dev)
+{
+ INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibrate);
+
+ mt76x0_ant_select(dev);
+
+ mt76x0_rf_init(dev);
+
+ mt76x0_set_rx_chains(dev);
+ mt76x0_set_tx_dac(dev);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h
new file mode 100644
index 000000000000..2880a43c3cb0
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h
@@ -0,0 +1,81 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MT76X0_PHY_H_
+#define _MT76X0_PHY_H_
+
+#define RF_G_BAND 0x0100
+#define RF_A_BAND 0x0200
+#define RF_A_BAND_LB 0x0400
+#define RF_A_BAND_MB 0x0800
+#define RF_A_BAND_HB 0x1000
+#define RF_A_BAND_11J 0x2000
+
+#define RF_BW_20 1
+#define RF_BW_40 2
+#define RF_BW_10 4
+#define RF_BW_80 8
+
+#define MT_RF(bank, reg) ((bank) << 16 | (reg))
+#define MT_RF_BANK(offset) (offset >> 16)
+#define MT_RF_REG(offset) (offset & 0xff)
+
+struct mt76x0_bbp_switch_item {
+ u16 bw_band;
+ struct mt76_reg_pair reg_pair;
+};
+
+struct mt76x0_rf_switch_item {
+ u32 rf_bank_reg;
+ u16 bw_band;
+ u8 value;
+};
+
+struct mt76x0_freq_item {
+ u8 channel;
+ u32 band;
+ u8 pllR37;
+ u8 pllR36;
+ u8 pllR35;
+ u8 pllR34;
+ u8 pllR33;
+ u8 pllR32_b7b5;
+ u8 pllR32_b4b0; /* PLL_DEN (Denomina - 8) */
+ u8 pllR31_b7b5;
+ u8 pllR31_b4b0; /* PLL_K (Nominator *)*/
+ u8 pllR30_b7; /* sdm_reset_n */
+ u8 pllR30_b6b2; /* sdmmash_prbs,sin */
+ u8 pllR30_b1; /* sdm_bp */
+ u16 pll_n; /* R30<0>, R29<7:0> (hex) */
+ u8 pllR28_b7b6; /* isi,iso */
+ u8 pllR28_b5b4; /* pfd_dly */
+ u8 pllR28_b3b2; /* clksel option */
+ u32 pll_sdm_k; /* R28<1:0>, R27<7:0>, R26<7:0> (hex) SDM_k */
+ u8 pllR24_b1b0; /* xo_div */
+};
+
+struct mt76x0_rate_pwr_item {
+ s8 mcs_power;
+ u8 rf_pa_mode;
+};
+
+struct mt76x0_rate_pwr_tab {
+ struct mt76x0_rate_pwr_item cck[4];
+ struct mt76x0_rate_pwr_item ofdm[8];
+ struct mt76x0_rate_pwr_item ht[8];
+ struct mt76x0_rate_pwr_item vht[10];
+ struct mt76x0_rate_pwr_item stbc[8];
+ struct mt76x0_rate_pwr_item mcs32;
+};
+
+#endif /* _MT76X0_PHY_H_ */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/regs.h b/drivers/net/wireless/mediatek/mt76/mt76x0/regs.h
new file mode 100644
index 000000000000..16bed4aaa242
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/regs.h
@@ -0,0 +1,651 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76_REGS_H
+#define __MT76_REGS_H
+
+#include <linux/bitops.h>
+
+#define MT_ASIC_VERSION 0x0000
+
+#define MT76XX_REV_E3 0x22
+#define MT76XX_REV_E4 0x33
+
+#define MT_CMB_CTRL 0x0020
+#define MT_CMB_CTRL_XTAL_RDY BIT(22)
+#define MT_CMB_CTRL_PLL_LD BIT(23)
+
+#define MT_EFUSE_CTRL 0x0024
+#define MT_EFUSE_CTRL_AOUT GENMASK(5, 0)
+#define MT_EFUSE_CTRL_MODE GENMASK(7, 6)
+#define MT_EFUSE_CTRL_LDO_OFF_TIME GENMASK(13, 8)
+#define MT_EFUSE_CTRL_LDO_ON_TIME GENMASK(15, 14)
+#define MT_EFUSE_CTRL_AIN GENMASK(25, 16)
+#define MT_EFUSE_CTRL_KICK BIT(30)
+#define MT_EFUSE_CTRL_SEL BIT(31)
+
+#define MT_EFUSE_DATA_BASE 0x0028
+#define MT_EFUSE_DATA(_n) (MT_EFUSE_DATA_BASE + ((_n) << 2))
+
+#define MT_COEXCFG0 0x0040
+#define MT_COEXCFG0_COEX_EN BIT(0)
+
+#define MT_COEXCFG3 0x004c
+
+#define MT_LDO_CTRL_0 0x006c
+#define MT_LDO_CTRL_1 0x0070
+
+#define MT_WLAN_FUN_CTRL 0x0080
+#define MT_WLAN_FUN_CTRL_WLAN_EN BIT(0)
+#define MT_WLAN_FUN_CTRL_WLAN_CLK_EN BIT(1)
+#define MT_WLAN_FUN_CTRL_WLAN_RESET_RF BIT(2)
+
+#define MT_WLAN_FUN_CTRL_WLAN_RESET BIT(3) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN BIT(3) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_PCIE_CLK_REQ BIT(4)
+#define MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL BIT(5)
+#define MT_WLAN_FUN_CTRL_INV_ANT_SEL BIT(6)
+#define MT_WLAN_FUN_CTRL_WAKE_HOST BIT(7)
+
+#define MT_WLAN_FUN_CTRL_THERM_RST BIT(8) /* MT76x2 */
+#define MT_WLAN_FUN_CTRL_THERM_CKEN BIT(9) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_GPIO_IN GENMASK(15, 8) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT GENMASK(23, 16) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN GENMASK(31, 24) /* MT76x0 */
+
+#define MT_XO_CTRL0 0x0100
+#define MT_XO_CTRL1 0x0104
+#define MT_XO_CTRL2 0x0108
+#define MT_XO_CTRL3 0x010c
+#define MT_XO_CTRL4 0x0110
+
+#define MT_XO_CTRL5 0x0114
+#define MT_XO_CTRL5_C2_VAL GENMASK(14, 8)
+
+#define MT_XO_CTRL6 0x0118
+#define MT_XO_CTRL6_C2_CTRL GENMASK(14, 8)
+
+#define MT_XO_CTRL7 0x011c
+
+#define MT_IOCFG_6 0x0124
+#define MT_WLAN_MTC_CTRL 0x10148
+#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP BIT(0)
+#define MT_WLAN_MTC_CTRL_PWR_ACK BIT(12)
+#define MT_WLAN_MTC_CTRL_PWR_ACK_S BIT(13)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_PD GENMASK(19, 16)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_PD BIT(20)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_PD BIT(21)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_PD BIT(22)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_RB BIT(24)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_RB BIT(25)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_RB BIT(26)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_RB BIT(27)
+#define MT_WLAN_MTC_CTRL_STATE_UP BIT(28)
+
+#define MT_INT_SOURCE_CSR 0x0200
+#define MT_INT_MASK_CSR 0x0204
+
+#define MT_INT_RX_DONE(_n) BIT(_n)
+#define MT_INT_RX_DONE_ALL GENMASK(1, 0)
+#define MT_INT_TX_DONE_ALL GENMASK(13, 4)
+#define MT_INT_TX_DONE(_n) BIT(_n + 4)
+#define MT_INT_RX_COHERENT BIT(16)
+#define MT_INT_TX_COHERENT BIT(17)
+#define MT_INT_ANY_COHERENT BIT(18)
+#define MT_INT_MCU_CMD BIT(19)
+#define MT_INT_TBTT BIT(20)
+#define MT_INT_PRE_TBTT BIT(21)
+#define MT_INT_TX_STAT BIT(22)
+#define MT_INT_AUTO_WAKEUP BIT(23)
+#define MT_INT_GPTIMER BIT(24)
+#define MT_INT_RXDELAYINT BIT(26)
+#define MT_INT_TXDELAYINT BIT(27)
+
+#define MT_WPDMA_GLO_CFG 0x0208
+#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0)
+#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY BIT(1)
+#define MT_WPDMA_GLO_CFG_RX_DMA_EN BIT(2)
+#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY BIT(3)
+#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE GENMASK(5, 4)
+#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6)
+#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
+#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN GENMASK(15, 8)
+#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS BIT(30)
+#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
+
+#define MT_WPDMA_RST_IDX 0x020c
+
+#define MT_WPDMA_DELAY_INT_CFG 0x0210
+
+#define MT_WMM_AIFSN 0x0214
+#define MT_WMM_AIFSN_MASK GENMASK(3, 0)
+#define MT_WMM_AIFSN_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_CWMIN 0x0218
+#define MT_WMM_CWMIN_MASK GENMASK(3, 0)
+#define MT_WMM_CWMIN_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_CWMAX 0x021c
+#define MT_WMM_CWMAX_MASK GENMASK(3, 0)
+#define MT_WMM_CWMAX_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_TXOP_BASE 0x0220
+#define MT_WMM_TXOP(_n) (MT_WMM_TXOP_BASE + (((_n) / 2) << 2))
+#define MT_WMM_TXOP_SHIFT(_n) ((_n & 1) * 16)
+#define MT_WMM_TXOP_MASK GENMASK(15, 0)
+
+#define MT_WMM_CTRL 0x0230 /* MT76x0 */
+
+#define MT_FCE_DMA_ADDR 0x0230
+#define MT_FCE_DMA_LEN 0x0234
+
+#define MT_USB_DMA_CFG 0x238
+#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT GENMASK(7, 0)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT GENMASK(15, 8)
+#define MT_USB_DMA_CFG_TX_WL_DROP BIT(16)
+#define MT_USB_DMA_CFG_WAKEUP_EN BIT(17)
+#define MT_USB_DMA_CFG_RX_DROP_OR_PADDING BIT(18)
+#define MT_USB_DMA_CFG_TX_CLR BIT(19)
+#define MT_USB_DMA_CFG_WL_LPK_EN BIT(20)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_EN BIT(21)
+#define MT_USB_DMA_CFG_RX_BULK_EN BIT(22)
+#define MT_USB_DMA_CFG_TX_BULK_EN BIT(23)
+#define MT_USB_DMA_CFG_EP_OUT_VALID GENMASK(29, 24)
+#define MT_USB_DMA_CFG_RX_BUSY BIT(30)
+#define MT_USB_DMA_CFG_TX_BUSY BIT(31)
+#if 0
+#define MT_USB_DMA_CFG_TX_CLR BIT(19)
+#define MT_USB_DMA_CFG_TXOP_HALT BIT(20)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_EN BIT(21)
+#define MT_USB_DMA_CFG_RX_BULK_EN BIT(22)
+#define MT_USB_DMA_CFG_TX_BULK_EN BIT(23)
+#define MT_USB_DMA_CFG_UDMA_RX_WL_DROP BIT(25)
+#endif
+
+#define MT_TSO_CTRL 0x0250
+#define MT_HEADER_TRANS_CTRL_REG 0x0260
+
+#define MT_US_CYC_CFG 0x02a4
+#define MT_US_CYC_CNT GENMASK(7, 0)
+
+#define MT_TX_RING_BASE 0x0300
+#define MT_RX_RING_BASE 0x03c0
+#define MT_RING_SIZE 0x10
+
+#define MT_TX_HW_QUEUE_MCU 8
+#define MT_TX_HW_QUEUE_MGMT 9
+
+#define MT_PBF_SYS_CTRL 0x0400
+#define MT_PBF_SYS_CTRL_MCU_RESET BIT(0)
+#define MT_PBF_SYS_CTRL_DMA_RESET BIT(1)
+#define MT_PBF_SYS_CTRL_MAC_RESET BIT(2)
+#define MT_PBF_SYS_CTRL_PBF_RESET BIT(3)
+#define MT_PBF_SYS_CTRL_ASY_RESET BIT(4)
+
+#define MT_PBF_CFG 0x0404
+#define MT_PBF_CFG_TX0Q_EN BIT(0)
+#define MT_PBF_CFG_TX1Q_EN BIT(1)
+#define MT_PBF_CFG_TX2Q_EN BIT(2)
+#define MT_PBF_CFG_TX3Q_EN BIT(3)
+#define MT_PBF_CFG_RX0Q_EN BIT(4)
+#define MT_PBF_CFG_RX_DROP_EN BIT(8)
+
+#define MT_PBF_TX_MAX_PCNT 0x0408
+#define MT_PBF_RX_MAX_PCNT 0x040c
+
+#define MT_BCN_OFFSET_BASE 0x041c
+#define MT_BCN_OFFSET(_n) (MT_BCN_OFFSET_BASE + ((_n) << 2))
+
+#define MT_RXQ_STA 0x0430
+#define MT_TXQ_STA 0x0434
+#define MT_RF_CSR_CFG 0x0500
+#define MT_RF_CSR_CFG_DATA GENMASK(7, 0)
+#define MT_RF_CSR_CFG_REG_ID GENMASK(13, 8)
+#define MT_RF_CSR_CFG_REG_BANK GENMASK(17, 14)
+#define MT_RF_CSR_CFG_WR BIT(30)
+#define MT_RF_CSR_CFG_KICK BIT(31)
+
+#define MT_RF_BYPASS_0 0x0504
+#define MT_RF_BYPASS_1 0x0508
+#define MT_RF_SETTING_0 0x050c
+
+#define MT_RF_MISC 0x0518
+#define MT_RF_DATA_WRITE 0x0524
+
+#define MT_RF_CTRL 0x0528
+#define MT_RF_CTRL_ADDR GENMASK(11, 0)
+#define MT_RF_CTRL_WRITE BIT(12)
+#define MT_RF_CTRL_BUSY BIT(13)
+#define MT_RF_CTRL_IDX BIT(16)
+
+#define MT_RF_DATA_READ 0x052c
+
+#define MT_COM_REG0 0x0730
+#define MT_COM_REG1 0x0734
+#define MT_COM_REG2 0x0738
+#define MT_COM_REG3 0x073C
+
+#define MT_FCE_PSE_CTRL 0x0800
+#define MT_FCE_PARAMETERS 0x0804
+#define MT_FCE_CSO 0x0808
+
+#define MT_FCE_L2_STUFF 0x080c
+#define MT_FCE_L2_STUFF_HT_L2_EN BIT(0)
+#define MT_FCE_L2_STUFF_QOS_L2_EN BIT(1)
+#define MT_FCE_L2_STUFF_RX_STUFF_EN BIT(2)
+#define MT_FCE_L2_STUFF_TX_STUFF_EN BIT(3)
+#define MT_FCE_L2_STUFF_WR_MPDU_LEN_EN BIT(4)
+#define MT_FCE_L2_STUFF_MVINV_BSWAP BIT(5)
+#define MT_FCE_L2_STUFF_TS_CMD_QSEL_EN GENMASK(15, 8)
+#define MT_FCE_L2_STUFF_TS_LEN_EN GENMASK(23, 16)
+#define MT_FCE_L2_STUFF_OTHER_PORT GENMASK(25, 24)
+
+#define MT_FCE_WLAN_FLOW_CONTROL1 0x0824
+
+#define MT_TX_CPU_FROM_FCE_BASE_PTR 0x09a0
+#define MT_TX_CPU_FROM_FCE_MAX_COUNT 0x09a4
+#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8
+
+#define MT_FCE_PDMA_GLOBAL_CONF 0x09c4
+
+#define MT_PAUSE_ENABLE_CONTROL1 0x0a38
+
+#define MT_FCE_SKIP_FS 0x0a6c
+
+#define MT_MAC_CSR0 0x1000
+#define MT_MAC_SYS_CTRL 0x1004
+#define MT_MAC_SYS_CTRL_RESET_CSR BIT(0)
+#define MT_MAC_SYS_CTRL_RESET_BBP BIT(1)
+#define MT_MAC_SYS_CTRL_ENABLE_TX BIT(2)
+#define MT_MAC_SYS_CTRL_ENABLE_RX BIT(3)
+
+#define MT_MAC_ADDR_DW0 0x1008
+#define MT_MAC_ADDR_DW1 0x100c
+#define MT_MAC_ADDR_DW1_U2ME_MASK GENMASK(23, 16)
+
+#define MT_MAC_BSSID_DW0 0x1010
+#define MT_MAC_BSSID_DW1 0x1014
+#define MT_MAC_BSSID_DW1_ADDR GENMASK(15, 0)
+#define MT_MAC_BSSID_DW1_MBSS_MODE GENMASK(17, 16)
+#define MT_MAC_BSSID_DW1_MBEACON_N GENMASK(20, 18)
+#define MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT BIT(21)
+#define MT_MAC_BSSID_DW1_MBSS_MODE_B2 BIT(22)
+#define MT_MAC_BSSID_DW1_MBEACON_N_B3 BIT(23)
+#define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE GENMASK(26, 24)
+
+#define MT_MAX_LEN_CFG 0x1018
+#define MT_MAX_LEN_CFG_AMPDU GENMASK(13, 12)
+
+#define MT_LED_CFG 0x102c
+
+#define MT_AMPDU_MAX_LEN_20M1S 0x1030
+#define MT_AMPDU_MAX_LEN_20M2S 0x1034
+#define MT_AMPDU_MAX_LEN_40M1S 0x1038
+#define MT_AMPDU_MAX_LEN_40M2S 0x103c
+#define MT_AMPDU_MAX_LEN 0x1040
+
+#define MT_WCID_DROP_BASE 0x106c
+#define MT_WCID_DROP(_n) (MT_WCID_DROP_BASE + ((_n) >> 5) * 4)
+#define MT_WCID_DROP_MASK(_n) BIT((_n) % 32)
+
+#define MT_BCN_BYPASS_MASK 0x108c
+
+#define MT_MAC_APC_BSSID_BASE 0x1090
+#define MT_MAC_APC_BSSID_L(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8))
+#define MT_MAC_APC_BSSID_H(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8 + 4))
+#define MT_MAC_APC_BSSID_H_ADDR GENMASK(15, 0)
+#define MT_MAC_APC_BSSID0_H_EN BIT(16)
+
+#define MT_XIFS_TIME_CFG 0x1100
+#define MT_XIFS_TIME_CFG_CCK_SIFS GENMASK(7, 0)
+#define MT_XIFS_TIME_CFG_OFDM_SIFS GENMASK(15, 8)
+#define MT_XIFS_TIME_CFG_OFDM_XIFS GENMASK(19, 16)
+#define MT_XIFS_TIME_CFG_EIFS GENMASK(28, 20)
+#define MT_XIFS_TIME_CFG_BB_RXEND_EN BIT(29)
+
+#define MT_BKOFF_SLOT_CFG 0x1104
+#define MT_BKOFF_SLOT_CFG_SLOTTIME GENMASK(7, 0)
+#define MT_BKOFF_SLOT_CFG_CC_DELAY GENMASK(11, 8)
+
+#define MT_BEACON_TIME_CFG 0x1114
+#define MT_BEACON_TIME_CFG_INTVAL GENMASK(15, 0)
+#define MT_BEACON_TIME_CFG_TIMER_EN BIT(16)
+#define MT_BEACON_TIME_CFG_SYNC_MODE GENMASK(18, 17)
+#define MT_BEACON_TIME_CFG_TBTT_EN BIT(19)
+#define MT_BEACON_TIME_CFG_BEACON_TX BIT(20)
+#define MT_BEACON_TIME_CFG_TSF_COMP GENMASK(31, 24)
+
+#define MT_TBTT_SYNC_CFG 0x1118
+#define MT_TBTT_TIMER_CFG 0x1124
+
+#define MT_INT_TIMER_CFG 0x1128
+#define MT_INT_TIMER_CFG_PRE_TBTT GENMASK(15, 0)
+#define MT_INT_TIMER_CFG_GP_TIMER GENMASK(31, 16)
+
+#define MT_INT_TIMER_EN 0x112c
+#define MT_INT_TIMER_EN_PRE_TBTT_EN BIT(0)
+#define MT_INT_TIMER_EN_GP_TIMER_EN BIT(1)
+
+#define MT_MAC_STATUS 0x1200
+#define MT_MAC_STATUS_TX BIT(0)
+#define MT_MAC_STATUS_RX BIT(1)
+
+#define MT_PWR_PIN_CFG 0x1204
+#define MT_AUX_CLK_CFG 0x120c
+
+#define MT_BB_PA_MODE_CFG0 0x1214
+#define MT_BB_PA_MODE_CFG1 0x1218
+#define MT_RF_PA_MODE_CFG0 0x121c
+#define MT_RF_PA_MODE_CFG1 0x1220
+
+#define MT_RF_PA_MODE_ADJ0 0x1228
+#define MT_RF_PA_MODE_ADJ1 0x122c
+
+#define MT_DACCLK_EN_DLY_CFG 0x1264
+
+#define MT_EDCA_CFG_BASE 0x1300
+#define MT_EDCA_CFG_AC(_n) (MT_EDCA_CFG_BASE + ((_n) << 2))
+#define MT_EDCA_CFG_TXOP GENMASK(7, 0)
+#define MT_EDCA_CFG_AIFSN GENMASK(11, 8)
+#define MT_EDCA_CFG_CWMIN GENMASK(15, 12)
+#define MT_EDCA_CFG_CWMAX GENMASK(19, 16)
+
+#define MT_TX_PWR_CFG_0 0x1314
+#define MT_TX_PWR_CFG_1 0x1318
+#define MT_TX_PWR_CFG_2 0x131c
+#define MT_TX_PWR_CFG_3 0x1320
+#define MT_TX_PWR_CFG_4 0x1324
+
+#define MT_TX_BAND_CFG 0x132c
+#define MT_TX_BAND_CFG_UPPER_40M BIT(0)
+#define MT_TX_BAND_CFG_5G BIT(1)
+#define MT_TX_BAND_CFG_2G BIT(2)
+
+#define MT_HT_FBK_TO_LEGACY 0x1384
+#define MT_TX_MPDU_ADJ_INT 0x1388
+
+#define MT_TX_PWR_CFG_7 0x13d4
+#define MT_TX_PWR_CFG_8 0x13d8
+#define MT_TX_PWR_CFG_9 0x13dc
+
+#define MT_TX_SW_CFG0 0x1330
+#define MT_TX_SW_CFG1 0x1334
+#define MT_TX_SW_CFG2 0x1338
+
+#define MT_TXOP_CTRL_CFG 0x1340
+#define MT_TXOP_TRUN_EN GENMASK(5, 0)
+#define MT_TXOP_EXT_CCA_DLY GENMASK(15, 8)
+#define MT_TXOP_CTRL
+
+#define MT_TX_RTS_CFG 0x1344
+#define MT_TX_RTS_CFG_RETRY_LIMIT GENMASK(7, 0)
+#define MT_TX_RTS_CFG_THRESH GENMASK(23, 8)
+#define MT_TX_RTS_FALLBACK BIT(24)
+
+#define MT_TX_TIMEOUT_CFG 0x1348
+#define MT_TX_RETRY_CFG 0x134c
+#define MT_TX_LINK_CFG 0x1350
+#define MT_HT_FBK_CFG0 0x1354
+#define MT_HT_FBK_CFG1 0x1358
+#define MT_LG_FBK_CFG0 0x135c
+#define MT_LG_FBK_CFG1 0x1360
+
+#define MT_CCK_PROT_CFG 0x1364
+#define MT_OFDM_PROT_CFG 0x1368
+#define MT_MM20_PROT_CFG 0x136c
+#define MT_MM40_PROT_CFG 0x1370
+#define MT_GF20_PROT_CFG 0x1374
+#define MT_GF40_PROT_CFG 0x1378
+
+#define MT_PROT_RATE GENMASK(15, 0)
+#define MT_PROT_CTRL_RTS_CTS BIT(16)
+#define MT_PROT_CTRL_CTS2SELF BIT(17)
+#define MT_PROT_NAV_SHORT BIT(18)
+#define MT_PROT_NAV_LONG BIT(19)
+#define MT_PROT_TXOP_ALLOW_CCK BIT(20)
+#define MT_PROT_TXOP_ALLOW_OFDM BIT(21)
+#define MT_PROT_TXOP_ALLOW_MM20 BIT(22)
+#define MT_PROT_TXOP_ALLOW_MM40 BIT(23)
+#define MT_PROT_TXOP_ALLOW_GF20 BIT(24)
+#define MT_PROT_TXOP_ALLOW_GF40 BIT(25)
+#define MT_PROT_RTS_THR_EN BIT(26)
+#define MT_PROT_RATE_CCK_11 0x0003
+#define MT_PROT_RATE_OFDM_6 0x4000
+#define MT_PROT_RATE_OFDM_24 0x4004
+#define MT_PROT_RATE_DUP_OFDM_24 0x4084
+#define MT_PROT_TXOP_ALLOW_ALL GENMASK(25, 20)
+#define MT_PROT_TXOP_ALLOW_BW20 (MT_PROT_TXOP_ALLOW_ALL & \
+ ~MT_PROT_TXOP_ALLOW_MM40 & \
+ ~MT_PROT_TXOP_ALLOW_GF40)
+
+#define MT_EXP_ACK_TIME 0x1380
+
+#define MT_TX_PWR_CFG_0_EXT 0x1390
+#define MT_TX_PWR_CFG_1_EXT 0x1394
+
+#define MT_TX_FBK_LIMIT 0x1398
+#define MT_TX_FBK_LIMIT_MPDU_FBK GENMASK(7, 0)
+#define MT_TX_FBK_LIMIT_AMPDU_FBK GENMASK(15, 8)
+#define MT_TX_FBK_LIMIT_MPDU_UP_CLEAR BIT(16)
+#define MT_TX_FBK_LIMIT_AMPDU_UP_CLEAR BIT(17)
+#define MT_TX_FBK_LIMIT_RATE_LUT BIT(18)
+
+#define MT_TX0_RF_GAIN_CORR 0x13a0
+#define MT_TX1_RF_GAIN_CORR 0x13a4
+#define MT_TX0_RF_GAIN_ATTEN 0x13a8
+
+#define MT_TX_ALC_CFG_0 0x13b0
+#define MT_TX_ALC_CFG_0_CH_INIT_0 GENMASK(5, 0)
+#define MT_TX_ALC_CFG_0_CH_INIT_1 GENMASK(13, 8)
+#define MT_TX_ALC_CFG_0_LIMIT_0 GENMASK(21, 16)
+#define MT_TX_ALC_CFG_0_LIMIT_1 GENMASK(29, 24)
+
+#define MT_TX_ALC_CFG_1 0x13b4
+#define MT_TX_ALC_CFG_1_TEMP_COMP GENMASK(5, 0)
+
+#define MT_TX_ALC_CFG_2 0x13a8
+#define MT_TX_ALC_CFG_2_TEMP_COMP GENMASK(5, 0)
+
+#define MT_TX0_BB_GAIN_ATTEN 0x13c0
+
+#define MT_TX_ALC_VGA3 0x13c8
+
+#define MT_TX_PROT_CFG6 0x13e0
+#define MT_TX_PROT_CFG7 0x13e4
+#define MT_TX_PROT_CFG8 0x13e8
+
+#define MT_PIFS_TX_CFG 0x13ec
+
+#define MT_RX_FILTR_CFG 0x1400
+
+#define MT_RX_FILTR_CFG_CRC_ERR BIT(0)
+#define MT_RX_FILTR_CFG_PHY_ERR BIT(1)
+#define MT_RX_FILTR_CFG_PROMISC BIT(2)
+#define MT_RX_FILTR_CFG_OTHER_BSS BIT(3)
+#define MT_RX_FILTR_CFG_VER_ERR BIT(4)
+#define MT_RX_FILTR_CFG_MCAST BIT(5)
+#define MT_RX_FILTR_CFG_BCAST BIT(6)
+#define MT_RX_FILTR_CFG_DUP BIT(7)
+#define MT_RX_FILTR_CFG_CFACK BIT(8)
+#define MT_RX_FILTR_CFG_CFEND BIT(9)
+#define MT_RX_FILTR_CFG_ACK BIT(10)
+#define MT_RX_FILTR_CFG_CTS BIT(11)
+#define MT_RX_FILTR_CFG_RTS BIT(12)
+#define MT_RX_FILTR_CFG_PSPOLL BIT(13)
+#define MT_RX_FILTR_CFG_BA BIT(14)
+#define MT_RX_FILTR_CFG_BAR BIT(15)
+#define MT_RX_FILTR_CFG_CTRL_RSV BIT(16)
+
+#define MT_AUTO_RSP_CFG 0x1404
+
+#define MT_AUTO_RSP_PREAMB_SHORT BIT(4)
+
+#define MT_LEGACY_BASIC_RATE 0x1408
+#define MT_HT_BASIC_RATE 0x140c
+#define MT_HT_CTRL_CFG 0x1410
+#define MT_RX_PARSER_CFG 0x1418
+#define MT_RX_PARSER_RX_SET_NAV_ALL BIT(0)
+
+#define MT_EXT_CCA_CFG 0x141c
+#define MT_EXT_CCA_CFG_CCA0 GENMASK(1, 0)
+#define MT_EXT_CCA_CFG_CCA1 GENMASK(3, 2)
+#define MT_EXT_CCA_CFG_CCA2 GENMASK(5, 4)
+#define MT_EXT_CCA_CFG_CCA3 GENMASK(7, 6)
+#define MT_EXT_CCA_CFG_CCA_MASK GENMASK(11, 8)
+#define MT_EXT_CCA_CFG_ED_CCA_MASK GENMASK(15, 12)
+
+#define MT_TX_SW_CFG3 0x1478
+
+#define MT_PN_PAD_MODE 0x150c
+
+#define MT_TXOP_HLDR_ET 0x1608
+
+#define MT_PROT_AUTO_TX_CFG 0x1648
+
+#define MT_RX_STA_CNT0 0x1700
+#define MT_RX_STA_CNT1 0x1704
+#define MT_RX_STA_CNT2 0x1708
+#define MT_TX_STA_CNT0 0x170c
+#define MT_TX_STA_CNT1 0x1710
+#define MT_TX_STA_CNT2 0x1714
+
+/* Vendor driver defines content of the second word of STAT_FIFO as follows:
+ * MT_TX_STAT_FIFO_RATE GENMASK(26, 16)
+ * MT_TX_STAT_FIFO_ETXBF BIT(27)
+ * MT_TX_STAT_FIFO_SND BIT(28)
+ * MT_TX_STAT_FIFO_ITXBF BIT(29)
+ * However, tests show that b16-31 have the same layout as TXWI rate_ctl
+ * with rate set to rate at which frame was acked.
+ */
+#define MT_TX_STAT_FIFO 0x1718
+#define MT_TX_STAT_FIFO_VALID BIT(0)
+#define MT_TX_STAT_FIFO_SUCCESS BIT(5)
+#define MT_TX_STAT_FIFO_AGGR BIT(6)
+#define MT_TX_STAT_FIFO_ACKREQ BIT(7)
+#define MT_TX_STAT_FIFO_WCID GENMASK(15, 8)
+#define MT_TX_STAT_FIFO_RATE GENMASK(31, 16)
+
+#define MT_TX_AGG_STAT 0x171c
+
+#define MT_TX_AGG_CNT_BASE0 0x1720
+
+#define MT_MPDU_DENSITY_CNT 0x1740
+
+#define MT_TX_AGG_CNT_BASE1 0x174c
+
+#define MT_TX_AGG_CNT(_id) ((_id) < 8 ? \
+ MT_TX_AGG_CNT_BASE0 + ((_id) << 2) : \
+ MT_TX_AGG_CNT_BASE1 + ((_id - 8) << 2))
+
+#define MT_TX_STAT_FIFO_EXT 0x1798
+#define MT_TX_STAT_FIFO_EXT_RETRY GENMASK(7, 0)
+#define MT_TX_STAT_FIFO_EXT_PKTID GENMASK(15, 8)
+
+#define MT_BBP_CORE_BASE 0x2000
+#define MT_BBP_IBI_BASE 0x2100
+#define MT_BBP_AGC_BASE 0x2300
+#define MT_BBP_TXC_BASE 0x2400
+#define MT_BBP_RXC_BASE 0x2500
+#define MT_BBP_TXO_BASE 0x2600
+#define MT_BBP_TXBE_BASE 0x2700
+#define MT_BBP_RXFE_BASE 0x2800
+#define MT_BBP_RXO_BASE 0x2900
+#define MT_BBP_DFS_BASE 0x2a00
+#define MT_BBP_TR_BASE 0x2b00
+#define MT_BBP_CAL_BASE 0x2c00
+#define MT_BBP_DSC_BASE 0x2e00
+#define MT_BBP_PFMU_BASE 0x2f00
+
+#define MT_BBP(_type, _n) (MT_BBP_##_type##_BASE + ((_n) << 2))
+
+#define MT_BBP_CORE_R1_BW GENMASK(4, 3)
+
+#define MT_BBP_AGC_R0_CTRL_CHAN GENMASK(9, 8)
+#define MT_BBP_AGC_R0_BW GENMASK(14, 12)
+
+/* AGC, R4/R5 */
+#define MT_BBP_AGC_LNA_GAIN GENMASK(21, 16)
+
+/* AGC, R8/R9 */
+#define MT_BBP_AGC_GAIN GENMASK(14, 8)
+
+#define MT_BBP_AGC20_RSSI0 GENMASK(7, 0)
+#define MT_BBP_AGC20_RSSI1 GENMASK(15, 8)
+
+#define MT_BBP_TXBE_R0_CTRL_CHAN GENMASK(1, 0)
+
+#define MT_WCID_ADDR_BASE 0x1800
+#define MT_WCID_ADDR(_n) (MT_WCID_ADDR_BASE + (_n) * 8)
+
+#define MT_SRAM_BASE 0x4000
+
+#define MT_WCID_KEY_BASE 0x8000
+#define MT_WCID_KEY(_n) (MT_WCID_KEY_BASE + (_n) * 32)
+
+#define MT_WCID_IV_BASE 0xa000
+#define MT_WCID_IV(_n) (MT_WCID_IV_BASE + (_n) * 8)
+
+#define MT_WCID_ATTR_BASE 0xa800
+#define MT_WCID_ATTR(_n) (MT_WCID_ATTR_BASE + (_n) * 4)
+
+#define MT_WCID_ATTR_PAIRWISE BIT(0)
+#define MT_WCID_ATTR_PKEY_MODE GENMASK(3, 1)
+#define MT_WCID_ATTR_BSS_IDX GENMASK(6, 4)
+#define MT_WCID_ATTR_RXWI_UDF GENMASK(9, 7)
+#define MT_WCID_ATTR_PKEY_MODE_EXT BIT(10)
+#define MT_WCID_ATTR_BSS_IDX_EXT BIT(11)
+#define MT_WCID_ATTR_WAPI_MCBC BIT(15)
+#define MT_WCID_ATTR_WAPI_KEYID GENMASK(31, 24)
+
+#define MT_SKEY_BASE_0 0xac00
+#define MT_SKEY_BASE_1 0xb400
+#define MT_SKEY_0(_bss, _idx) \
+ (MT_SKEY_BASE_0 + (4 * (_bss) + _idx) * 32)
+#define MT_SKEY_1(_bss, _idx) \
+ (MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + _idx) * 32)
+#define MT_SKEY(_bss, _idx) \
+ ((_bss & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx))
+
+#define MT_SKEY_MODE_BASE_0 0xb000
+#define MT_SKEY_MODE_BASE_1 0xb3f0
+#define MT_SKEY_MODE_0(_bss) \
+ (MT_SKEY_MODE_BASE_0 + ((_bss / 2) << 2))
+#define MT_SKEY_MODE_1(_bss) \
+ (MT_SKEY_MODE_BASE_1 + ((((_bss) & 7) / 2) << 2))
+#define MT_SKEY_MODE(_bss) \
+ ((_bss & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss))
+#define MT_SKEY_MODE_MASK GENMASK(3, 0)
+#define MT_SKEY_MODE_SHIFT(_bss, _idx) (4 * ((_idx) + 4 * (_bss & 1)))
+
+#define MT_BEACON_BASE 0xc000
+
+#define MT_TEMP_SENSOR 0x1d000
+#define MT_TEMP_SENSOR_VAL GENMASK(6, 0)
+
+enum mt76_cipher_type {
+ MT_CIPHER_NONE,
+ MT_CIPHER_WEP40,
+ MT_CIPHER_WEP104,
+ MT_CIPHER_TKIP,
+ MT_CIPHER_AES_CCMP,
+ MT_CIPHER_CKIP40,
+ MT_CIPHER_CKIP104,
+ MT_CIPHER_CKIP128,
+ MT_CIPHER_WAPI,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.c b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.c
new file mode 100644
index 000000000000..8abdd3cd546d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.c
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h
new file mode 100644
index 000000000000..8a752a09f2dc
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h
@@ -0,0 +1,313 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#if !defined(__MT76X0U_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT76X0U_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt76x0.h"
+#include "mac.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt76x0
+
+#define MAXNAME 32
+#define DEV_ENTRY __array(char, wiphy_name, 32)
+#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \
+ wiphy_name(dev->hw->wiphy), MAXNAME)
+#define DEV_PR_FMT "%s "
+#define DEV_PR_ARG __entry->wiphy_name
+
+#define REG_ENTRY __field(u32, reg) __field(u32, val)
+#define REG_ASSIGN __entry->reg = reg; __entry->val = val
+#define REG_PR_FMT "%04x=%08x"
+#define REG_PR_ARG __entry->reg, __entry->val
+
+DECLARE_EVENT_CLASS(dev_reg_evt,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ REG_ENTRY
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ REG_ASSIGN;
+ ),
+ TP_printk(
+ DEV_PR_FMT REG_PR_FMT,
+ DEV_PR_ARG, REG_PR_ARG
+ )
+);
+
+DEFINE_EVENT(dev_reg_evt, mt76x0_reg_read,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_reg_evt, mt76x0_reg_write,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val)
+);
+
+TRACE_EVENT(mt76x0_submit_urb,
+ TP_PROTO(struct mt76_dev *dev, struct urb *u),
+ TP_ARGS(dev, u),
+ TP_STRUCT__entry(
+ DEV_ENTRY __field(unsigned, pipe) __field(u32, len)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->pipe = u->pipe;
+ __entry->len = u->transfer_buffer_length;
+ ),
+ TP_printk(DEV_PR_FMT "p:%08x len:%u",
+ DEV_PR_ARG, __entry->pipe, __entry->len)
+);
+
+#define trace_mt76x0_submit_urb_sync(__dev, __pipe, __len) ({ \
+ struct urb u; \
+ u.pipe = __pipe; \
+ u.transfer_buffer_length = __len; \
+ trace_mt76x0_submit_urb(__dev, &u); \
+})
+
+TRACE_EVENT(mt76x0_mcu_msg_send,
+ TP_PROTO(struct mt76_dev *dev,
+ struct sk_buff *skb, u32 csum, bool resp),
+ TP_ARGS(dev, skb, csum, resp),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u32, info)
+ __field(u32, csum)
+ __field(bool, resp)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->info = *(u32 *)skb->data;
+ __entry->csum = csum;
+ __entry->resp = resp;
+ ),
+ TP_printk(DEV_PR_FMT "i:%08x c:%08x r:%d",
+ DEV_PR_ARG, __entry->info, __entry->csum, __entry->resp)
+);
+
+TRACE_EVENT(mt76x0_vend_req,
+ TP_PROTO(struct mt76_dev *dev, unsigned pipe, u8 req, u8 req_type,
+ u16 val, u16 offset, void *buf, size_t buflen, int ret),
+ TP_ARGS(dev, pipe, req, req_type, val, offset, buf, buflen, ret),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(unsigned, pipe) __field(u8, req) __field(u8, req_type)
+ __field(u16, val) __field(u16, offset) __field(void*, buf)
+ __field(int, buflen) __field(int, ret)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->pipe = pipe;
+ __entry->req = req;
+ __entry->req_type = req_type;
+ __entry->val = val;
+ __entry->offset = offset;
+ __entry->buf = buf;
+ __entry->buflen = buflen;
+ __entry->ret = ret;
+ ),
+ TP_printk(DEV_PR_FMT
+ "%d p:%08x req:%02hhx %02hhx val:%04hx %04hx buf:%d %d",
+ DEV_PR_ARG, __entry->ret, __entry->pipe, __entry->req,
+ __entry->req_type, __entry->val, __entry->offset,
+ !!__entry->buf, __entry->buflen)
+);
+
+DECLARE_EVENT_CLASS(dev_rf_reg_evt,
+ TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val),
+ TP_ARGS(dev, bank, reg, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, bank)
+ __field(u8, reg)
+ __field(u8, val)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ REG_ASSIGN;
+ __entry->bank = bank;
+ ),
+ TP_printk(
+ DEV_PR_FMT "%02hhx:%02hhx=%02hhx",
+ DEV_PR_ARG, __entry->bank, __entry->reg, __entry->val
+ )
+);
+
+DEFINE_EVENT(dev_rf_reg_evt, mt76x0_rf_read,
+ TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val),
+ TP_ARGS(dev, bank, reg, val)
+);
+
+DEFINE_EVENT(dev_rf_reg_evt, mt76x0_rf_write,
+ TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val),
+ TP_ARGS(dev, bank, reg, val)
+);
+
+DECLARE_EVENT_CLASS(dev_simple_evt,
+ TP_PROTO(struct mt76_dev *dev, u8 val),
+ TP_ARGS(dev, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, val)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->val = val;
+ ),
+ TP_printk(
+ DEV_PR_FMT "%02hhx", DEV_PR_ARG, __entry->val
+ )
+);
+
+TRACE_EVENT(mt76x0_rx,
+ TP_PROTO(struct mt76_dev *dev, struct mt76x0_rxwi *rxwi, u32 f),
+ TP_ARGS(dev, rxwi, f),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field_struct(struct mt76x0_rxwi, rxwi)
+ __field(u32, fce_info)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->rxwi = *rxwi;
+ __entry->fce_info = f;
+ ),
+ TP_printk(DEV_PR_FMT "rxi:%08x ctl:%08x", DEV_PR_ARG,
+ le32_to_cpu(__entry->rxwi.rxinfo),
+ le32_to_cpu(__entry->rxwi.ctl))
+);
+
+TRACE_EVENT(mt76x0_tx,
+ TP_PROTO(struct mt76_dev *dev, struct sk_buff *skb,
+ struct mt76_sta *sta, struct mt76_txwi *h),
+ TP_ARGS(dev, skb, sta, h),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field_struct(struct mt76_txwi, h)
+ __field(struct sk_buff *, skb)
+ __field(struct mt76_sta *, sta)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->h = *h;
+ __entry->skb = skb;
+ __entry->sta = sta;
+ ),
+ TP_printk(DEV_PR_FMT "skb:%p sta:%p flg:%04hx rate_ctl:%04hx "
+ "ack:%02hhx wcid:%02hhx len_ctl:%05hx", DEV_PR_ARG,
+ __entry->skb, __entry->sta,
+ le16_to_cpu(__entry->h.flags),
+ le16_to_cpu(__entry->h.rate_ctl),
+ __entry->h.ack_ctl, __entry->h.wcid,
+ le16_to_cpu(__entry->h.len_ctl))
+);
+
+TRACE_EVENT(mt76x0_tx_dma_done,
+ TP_PROTO(struct mt76_dev *dev, struct sk_buff *skb),
+ TP_ARGS(dev, skb),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(struct sk_buff *, skb)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->skb = skb;
+ ),
+ TP_printk(DEV_PR_FMT "%p", DEV_PR_ARG, __entry->skb)
+);
+
+TRACE_EVENT(mt76x0_tx_status_cleaned,
+ TP_PROTO(struct mt76_dev *dev, int cleaned),
+ TP_ARGS(dev, cleaned),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(int, cleaned)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->cleaned = cleaned;
+ ),
+ TP_printk(DEV_PR_FMT "%d", DEV_PR_ARG, __entry->cleaned)
+);
+
+TRACE_EVENT(mt76x0_tx_status,
+ TP_PROTO(struct mt76_dev *dev, u32 stat1, u32 stat2),
+ TP_ARGS(dev, stat1, stat2),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u32, stat1) __field(u32, stat2)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->stat1 = stat1;
+ __entry->stat2 = stat2;
+ ),
+ TP_printk(DEV_PR_FMT "%08x %08x",
+ DEV_PR_ARG, __entry->stat1, __entry->stat2)
+);
+
+TRACE_EVENT(mt76x0_rx_dma_aggr,
+ TP_PROTO(struct mt76_dev *dev, int cnt, bool paged),
+ TP_ARGS(dev, cnt, paged),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, cnt)
+ __field(bool, paged)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->cnt = cnt;
+ __entry->paged = paged;
+ ),
+ TP_printk(DEV_PR_FMT "cnt:%d paged:%d",
+ DEV_PR_ARG, __entry->cnt, __entry->paged)
+);
+
+DEFINE_EVENT(dev_simple_evt, mt76x0_set_key,
+ TP_PROTO(struct mt76_dev *dev, u8 val),
+ TP_ARGS(dev, val)
+);
+
+TRACE_EVENT(mt76x0_set_shared_key,
+ TP_PROTO(struct mt76_dev *dev, u8 vid, u8 key),
+ TP_ARGS(dev, vid, key),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, vid)
+ __field(u8, key)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->vid = vid;
+ __entry->key = key;
+ ),
+ TP_printk(DEV_PR_FMT "phy:%02hhx off:%02hhx",
+ DEV_PR_ARG, __entry->vid, __entry->key)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c b/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c
new file mode 100644
index 000000000000..751b49c28ae5
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+#include "trace.h"
+
+/* Take mac80211 Q id from the skb and translate it to hardware Q id */
+static u8 skb2q(struct sk_buff *skb)
+{
+ int qid = skb_get_queue_mapping(skb);
+
+ if (WARN_ON(qid >= MT_TXQ_PSD)) {
+ qid = MT_TXQ_BE;
+ skb_set_queue_mapping(skb, qid);
+ }
+
+ return q2hwq(qid);
+}
+
+static void mt76x0_tx_skb_remove_dma_overhead(struct sk_buff *skb,
+ struct ieee80211_tx_info *info)
+{
+ int pkt_len = (unsigned long)info->status.status_driver_data[0];
+
+ skb_pull(skb, sizeof(struct mt76_txwi) + 4);
+ if (ieee80211_get_hdrlen_from_skb(skb) % 4)
+ mt76x0_remove_hdr_pad(skb);
+
+ skb_trim(skb, pkt_len);
+}
+
+void mt76x0_tx_status(struct mt76x0_dev *dev, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ mt76x0_tx_skb_remove_dma_overhead(skb, info);
+
+ ieee80211_tx_info_clear_status(info);
+ info->status.rates[0].idx = -1;
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ spin_lock(&dev->mac_lock);
+ ieee80211_tx_status(dev->mt76.hw, skb);
+ spin_unlock(&dev->mac_lock);
+}
+
+static int mt76x0_skb_rooms(struct mt76x0_dev *dev, struct sk_buff *skb)
+{
+ int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+ u32 need_head;
+
+ need_head = sizeof(struct mt76_txwi) + 4;
+ if (hdr_len % 4)
+ need_head += 2;
+
+ return skb_cow(skb, need_head);
+}
+
+static struct mt76_txwi *
+mt76x0_push_txwi(struct mt76x0_dev *dev, struct sk_buff *skb,
+ struct ieee80211_sta *sta, struct mt76_wcid *wcid,
+ int pkt_len)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *rate = &info->control.rates[0];
+ struct mt76_txwi *txwi;
+ unsigned long flags;
+ u16 txwi_flags = 0;
+ u32 pkt_id;
+ u16 rate_ctl;
+ u8 nss;
+
+ txwi = (struct mt76_txwi *)skb_push(skb, sizeof(struct mt76_txwi));
+ memset(txwi, 0, sizeof(*txwi));
+
+ if (!wcid->tx_rate_set)
+ ieee80211_get_tx_rates(info->control.vif, sta, skb,
+ info->control.rates, 1);
+
+ spin_lock_irqsave(&dev->mt76.lock, flags);
+ if (rate->idx < 0 || !rate->count) {
+ rate_ctl = wcid->tx_rate;
+ nss = wcid->tx_rate_nss;
+ } else {
+ rate_ctl = mt76x0_mac_tx_rate_val(dev, rate, &nss);
+ }
+ spin_unlock_irqrestore(&dev->mt76.lock, flags);
+
+ txwi->rate_ctl = cpu_to_le16(rate_ctl);
+
+ if (info->flags & IEEE80211_TX_CTL_LDPC)
+ txwi->rate_ctl |= cpu_to_le16(MT_RXWI_RATE_LDPC);
+ if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
+ txwi->rate_ctl |= cpu_to_le16(MT_RXWI_RATE_STBC);
+ if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
+ txwi_flags |= MT_TXWI_FLAGS_MMPS;
+
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
+ pkt_id = 1;
+ } else {
+ pkt_id = 0;
+ }
+
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ pkt_id |= MT_TXWI_PKTID_PROBE;
+
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
+
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
+ u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
+
+ ba_size <<= sta->ht_cap.ampdu_factor;
+ ba_size = min_t(int, 7, ba_size - 1);
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
+ ba_size = 0;
+ } else {
+ txwi_flags |= MT_TXWI_FLAGS_AMPDU;
+ txwi_flags |= FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
+ sta->ht_cap.ampdu_density);
+ }
+ txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
+ }
+
+ txwi->wcid = wcid->idx;
+ txwi->flags |= cpu_to_le16(txwi_flags);
+ txwi->len_ctl = cpu_to_le16(pkt_len);
+ txwi->pktid = pkt_id;
+
+ return txwi;
+}
+
+void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt76x0_dev *dev = hw->priv;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct ieee80211_sta *sta = control->sta;
+ struct mt76_sta *msta = NULL;
+ struct mt76_wcid *wcid = dev->mon_wcid;
+ struct mt76_txwi *txwi;
+ int pkt_len = skb->len;
+ int hw_q = skb2q(skb);
+
+ BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
+ info->status.status_driver_data[0] = (void *)(unsigned long)pkt_len;
+
+ if (mt76x0_skb_rooms(dev, skb) || mt76x0_insert_hdr_pad(skb)) {
+ ieee80211_free_txskb(dev->mt76.hw, skb);
+ return;
+ }
+
+ if (sta) {
+ msta = (struct mt76_sta *) sta->drv_priv;
+ wcid = &msta->wcid;
+ } else if (vif && (!info->control.hw_key && wcid->hw_key_idx != -1)) {
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+
+ wcid = &mvif->group_wcid;
+ }
+
+ txwi = mt76x0_push_txwi(dev, skb, sta, wcid, pkt_len);
+
+ if (mt76x0_dma_enqueue_tx(dev, skb, wcid, hw_q))
+ return;
+
+ trace_mt76x0_tx(&dev->mt76, skb, msta, txwi);
+}
+
+void mt76x0_tx_stat(struct work_struct *work)
+{
+ struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev,
+ stat_work.work);
+ struct mt76_tx_status stat;
+ unsigned long flags;
+ int cleaned = 0;
+ u8 update = 1;
+
+ while (!test_bit(MT76_REMOVED, &dev->mt76.state)) {
+ stat = mt76x0_mac_fetch_tx_status(dev);
+ if (!stat.valid)
+ break;
+
+ mt76x0_send_tx_status(dev, &stat, &update);
+
+ cleaned++;
+ }
+ trace_mt76x0_tx_status_cleaned(&dev->mt76, cleaned);
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
+ if (cleaned)
+ queue_delayed_work(dev->stat_wq, &dev->stat_work,
+ msecs_to_jiffies(10));
+ else if (test_and_clear_bit(MT76_MORE_STATS, &dev->mt76.state))
+ queue_delayed_work(dev->stat_wq, &dev->stat_work,
+ msecs_to_jiffies(20));
+ else
+ clear_bit(MT76_READING_STATS, &dev->mt76.state);
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
+
+int mt76x0_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ u8 cw_min = 5, cw_max = 10, hw_q = q2hwq(queue);
+ u32 val;
+
+ /* TODO: should we do funny things with the parameters?
+ * See what mt76x0_set_default_edca() used to do in init.c.
+ */
+
+ if (params->cw_min)
+ cw_min = fls(params->cw_min);
+ if (params->cw_max)
+ cw_max = fls(params->cw_max);
+
+ WARN_ON(params->txop > 0xff);
+ WARN_ON(params->aifs > 0xf);
+ WARN_ON(cw_min > 0xf);
+ WARN_ON(cw_max > 0xf);
+
+ val = FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
+ FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
+ FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
+ /* TODO: based on user-controlled EnableTxBurst var vendor drv sets
+ * a really long txop on AC0 (see connect.c:2009) but only on
+ * connect? When not connected should be 0.
+ */
+ if (!hw_q)
+ val |= 0x60;
+ else
+ val |= FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop);
+ mt76_wr(dev, MT_EDCA_CFG_AC(hw_q), val);
+
+ val = mt76_rr(dev, MT_WMM_TXOP(hw_q));
+ val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(hw_q));
+ val |= params->txop << MT_WMM_TXOP_SHIFT(hw_q);
+ mt76_wr(dev, MT_WMM_TXOP(hw_q), val);
+
+ val = mt76_rr(dev, MT_WMM_AIFSN);
+ val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(hw_q));
+ val |= params->aifs << MT_WMM_AIFSN_SHIFT(hw_q);
+ mt76_wr(dev, MT_WMM_AIFSN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMIN);
+ val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(hw_q));
+ val |= cw_min << MT_WMM_CWMIN_SHIFT(hw_q);
+ mt76_wr(dev, MT_WMM_CWMIN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMAX);
+ val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(hw_q));
+ val |= cw_max << MT_WMM_CWMAX_SHIFT(hw_q);
+ mt76_wr(dev, MT_WMM_CWMAX, val);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
new file mode 100644
index 000000000000..54ae1f113be2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -0,0 +1,381 @@
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "mt76x0.h"
+#include "usb.h"
+#include "trace.h"
+
+static struct usb_device_id mt76x0_device_table[] = {
+ { USB_DEVICE(0x148F, 0x7610) }, /* MT7610U */
+ { USB_DEVICE(0x13B1, 0x003E) }, /* Linksys AE6000 */
+ { USB_DEVICE(0x0E8D, 0x7610) }, /* Sabrent NTWLAC */
+ { USB_DEVICE(0x7392, 0xa711) }, /* Edimax 7711mac */
+ { USB_DEVICE(0x7392, 0xb711) }, /* Edimax / Elecom */
+ { USB_DEVICE(0x148f, 0x761a) }, /* TP-Link TL-WDN5200 */
+ { USB_DEVICE(0x148f, 0x760a) }, /* TP-Link unknown */
+ { USB_DEVICE(0x0b05, 0x17d1) }, /* Asus USB-AC51 */
+ { USB_DEVICE(0x0b05, 0x17db) }, /* Asus USB-AC50 */
+ { USB_DEVICE(0x0df6, 0x0075) }, /* Sitecom WLA-3100 */
+ { USB_DEVICE(0x2019, 0xab31) }, /* Planex GW-450D */
+ { USB_DEVICE(0x2001, 0x3d02) }, /* D-LINK DWA-171 rev B1 */
+ { USB_DEVICE(0x0586, 0x3425) }, /* Zyxel NWD6505 */
+ { USB_DEVICE(0x07b8, 0x7610) }, /* AboCom AU7212 */
+ { USB_DEVICE(0x04bb, 0x0951) }, /* I-O DATA WN-AC433UK */
+ { USB_DEVICE(0x057c, 0x8502) }, /* AVM FRITZ!WLAN USB Stick AC 430 */
+ { USB_DEVICE(0x293c, 0x5702) }, /* Comcast Xfinity KXW02AAA */
+ { USB_DEVICE(0x20f4, 0x806b) }, /* TRENDnet TEW-806UBH */
+ { USB_DEVICE(0x7392, 0xc711) }, /* Devolo Wifi ac Stick */
+ { USB_DEVICE(0x0df6, 0x0079) }, /* Sitecom Europe B.V. ac Stick */
+ { USB_DEVICE(0x2357, 0x0105) }, /* TP-LINK Archer T1U */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0E8D, 0x7630, 0xff, 0x2, 0xff)}, /* MT7630U */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0E8D, 0x7650, 0xff, 0x2, 0xff)}, /* MT7650U */
+ { 0, }
+};
+
+bool mt76x0_usb_alloc_buf(struct mt76x0_dev *dev, size_t len,
+ struct mt76x0_dma_buf *buf)
+{
+ struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+
+ buf->len = len;
+ buf->urb = usb_alloc_urb(0, GFP_KERNEL);
+ buf->buf = usb_alloc_coherent(usb_dev, buf->len, GFP_KERNEL, &buf->dma);
+
+ return !buf->urb || !buf->buf;
+}
+
+void mt76x0_usb_free_buf(struct mt76x0_dev *dev, struct mt76x0_dma_buf *buf)
+{
+ struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+
+ usb_free_coherent(usb_dev, buf->len, buf->buf, buf->dma);
+ usb_free_urb(buf->urb);
+}
+
+int mt76x0_usb_submit_buf(struct mt76x0_dev *dev, int dir, int ep_idx,
+ struct mt76x0_dma_buf *buf, gfp_t gfp,
+ usb_complete_t complete_fn, void *context)
+{
+ struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+ unsigned pipe;
+ int ret;
+
+ if (dir == USB_DIR_IN)
+ pipe = usb_rcvbulkpipe(usb_dev, dev->in_ep[ep_idx]);
+ else
+ pipe = usb_sndbulkpipe(usb_dev, dev->out_ep[ep_idx]);
+
+ usb_fill_bulk_urb(buf->urb, usb_dev, pipe, buf->buf, buf->len,
+ complete_fn, context);
+ buf->urb->transfer_dma = buf->dma;
+ buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ trace_mt76x0_submit_urb(&dev->mt76, buf->urb);
+ ret = usb_submit_urb(buf->urb, gfp);
+ if (ret)
+ dev_err(dev->mt76.dev, "Error: submit URB dir:%d ep:%d failed:%d\n",
+ dir, ep_idx, ret);
+ return ret;
+}
+
+void mt76x0_complete_urb(struct urb *urb)
+{
+ struct completion *cmpl = urb->context;
+
+ complete(cmpl);
+}
+
+int mt76x0_vendor_request(struct mt76x0_dev *dev, const u8 req,
+ const u8 direction, const u16 val, const u16 offset,
+ void *buf, const size_t buflen)
+{
+ int i, ret;
+ struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+ const u8 req_type = direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
+ const unsigned int pipe = (direction == USB_DIR_IN) ?
+ usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0);
+
+ for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
+ ret = usb_control_msg(usb_dev, pipe, req, req_type,
+ val, offset, buf, buflen,
+ MT_VEND_REQ_TOUT_MS);
+ trace_mt76x0_vend_req(&dev->mt76, pipe, req, req_type, val, offset,
+ buf, buflen, ret);
+
+ if (ret == -ENODEV)
+ set_bit(MT76_REMOVED, &dev->mt76.state);
+ if (ret >= 0 || ret == -ENODEV)
+ return ret;
+
+ msleep(5);
+ }
+
+ dev_err(dev->mt76.dev, "Vendor request req:%02x off:%04x failed:%d\n",
+ req, offset, ret);
+
+ return ret;
+}
+
+void mt76x0_vendor_reset(struct mt76x0_dev *dev)
+{
+ mt76x0_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
+ MT_VEND_DEV_MODE_RESET, 0, NULL, 0);
+}
+
+static u32 mt76x0_rr(struct mt76_dev *dev, u32 offset)
+{
+ struct mt76x0_dev *mdev = (struct mt76x0_dev *) dev;
+ int ret;
+ u32 val = ~0;
+
+ WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset);
+
+ mutex_lock(&mdev->usb_ctrl_mtx);
+
+ ret = mt76x0_vendor_request((struct mt76x0_dev *)dev, MT_VEND_MULTI_READ, USB_DIR_IN,
+ 0, offset, mdev->data, MT_VEND_BUF);
+ if (ret == MT_VEND_BUF)
+ val = get_unaligned_le32(mdev->data);
+ else if (ret > 0)
+ dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n",
+ ret, offset);
+
+ mutex_unlock(&mdev->usb_ctrl_mtx);
+
+ trace_mt76x0_reg_read(dev, offset, val);
+ return val;
+}
+
+int mt76x0_vendor_single_wr(struct mt76x0_dev *dev, const u8 req,
+ const u16 offset, const u32 val)
+{
+ struct mt76x0_dev *mdev = dev;
+ int ret;
+
+ mutex_lock(&mdev->usb_ctrl_mtx);
+
+ ret = mt76x0_vendor_request(dev, req, USB_DIR_OUT,
+ val & 0xffff, offset, NULL, 0);
+ if (!ret)
+ ret = mt76x0_vendor_request(dev, req, USB_DIR_OUT,
+ val >> 16, offset + 2, NULL, 0);
+
+ mutex_unlock(&mdev->usb_ctrl_mtx);
+
+ return ret;
+}
+
+static void mt76x0_wr(struct mt76_dev *dev, u32 offset, u32 val)
+{
+ struct mt76x0_dev *mdev = (struct mt76x0_dev *) dev;
+ int ret;
+
+ WARN_ONCE(offset > USHRT_MAX, "write high off:%08x", offset);
+
+ mutex_lock(&mdev->usb_ctrl_mtx);
+
+ put_unaligned_le32(val, mdev->data);
+ ret = mt76x0_vendor_request(mdev, MT_VEND_MULTI_WRITE, USB_DIR_OUT,
+ 0, offset, mdev->data, MT_VEND_BUF);
+ trace_mt76x0_reg_write(dev, offset, val);
+
+ mutex_unlock(&mdev->usb_ctrl_mtx);
+}
+
+static u32 mt76x0_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
+{
+ val |= mt76x0_rr(dev, offset) & ~mask;
+ mt76x0_wr(dev, offset, val);
+ return val;
+}
+
+static void mt76x0_wr_copy(struct mt76_dev *dev, u32 offset,
+ const void *data, int len)
+{
+ WARN_ONCE(offset & 3, "unaligned write copy off:%08x", offset);
+ WARN_ONCE(len & 3, "short write copy off:%08x", offset);
+
+ mt76x0_burst_write_regs((struct mt76x0_dev *) dev, offset, data, len / 4);
+}
+
+void mt76x0_addr_wr(struct mt76x0_dev *dev, const u32 offset, const u8 *addr)
+{
+ mt76_wr(dev, offset, get_unaligned_le32(addr));
+ mt76_wr(dev, offset + 4, addr[4] | addr[5] << 8);
+}
+
+static int mt76x0_assign_pipes(struct usb_interface *usb_intf,
+ struct mt76x0_dev *dev)
+{
+ struct usb_endpoint_descriptor *ep_desc;
+ struct usb_host_interface *intf_desc = usb_intf->cur_altsetting;
+ unsigned i, ep_i = 0, ep_o = 0;
+
+ BUILD_BUG_ON(sizeof(dev->in_ep) < __MT_EP_IN_MAX);
+ BUILD_BUG_ON(sizeof(dev->out_ep) < __MT_EP_OUT_MAX);
+
+ for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
+ ep_desc = &intf_desc->endpoint[i].desc;
+
+ if (usb_endpoint_is_bulk_in(ep_desc) &&
+ ep_i++ < __MT_EP_IN_MAX) {
+ dev->in_ep[ep_i - 1] = usb_endpoint_num(ep_desc);
+ dev->in_max_packet = usb_endpoint_maxp(ep_desc);
+ /* Note: this is ignored by usb sub-system but vendor
+ * code does it. We can drop this at some point.
+ */
+ dev->in_ep[ep_i - 1] |= USB_DIR_IN;
+ } else if (usb_endpoint_is_bulk_out(ep_desc) &&
+ ep_o++ < __MT_EP_OUT_MAX) {
+ dev->out_ep[ep_o - 1] = usb_endpoint_num(ep_desc);
+ dev->out_max_packet = usb_endpoint_maxp(ep_desc);
+ }
+ }
+
+ if (ep_i != __MT_EP_IN_MAX || ep_o != __MT_EP_OUT_MAX) {
+ dev_err(dev->mt76.dev, "Error: wrong pipe number in:%d out:%d\n",
+ ep_i, ep_o);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mt76x0_probe(struct usb_interface *usb_intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
+ struct mt76x0_dev *dev;
+ u32 asic_rev, mac_rev;
+ int ret;
+ static const struct mt76_bus_ops usb_ops = {
+ .rr = mt76x0_rr,
+ .wr = mt76x0_wr,
+ .rmw = mt76x0_rmw,
+ .copy = mt76x0_wr_copy,
+ };
+
+ dev = mt76x0_alloc_device(&usb_intf->dev);
+ if (!dev)
+ return -ENOMEM;
+
+ usb_dev = usb_get_dev(usb_dev);
+ usb_reset_device(usb_dev);
+
+ usb_set_intfdata(usb_intf, dev);
+
+ dev->mt76.bus = &usb_ops;
+
+ ret = mt76x0_assign_pipes(usb_intf, dev);
+ if (ret)
+ goto err;
+
+ /* Disable the HW, otherwise MCU fail to initalize on hot reboot */
+ mt76x0_chip_onoff(dev, false, false);
+
+ ret = mt76x0_wait_asic_ready(dev);
+ if (ret)
+ goto err;
+
+ asic_rev = mt76_rr(dev, MT_ASIC_VERSION);
+ mac_rev = mt76_rr(dev, MT_MAC_CSR0);
+ dev_info(dev->mt76.dev, "ASIC revision: %08x MAC revision: %08x\n",
+ asic_rev, mac_rev);
+
+ /* Note: vendor driver skips this check for MT76X0U */
+ if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
+ dev_warn(dev->mt76.dev, "Warning: eFUSE not present\n");
+
+ ret = mt76x0_init_hardware(dev);
+ if (ret)
+ goto err;
+
+ ret = mt76x0_register_device(dev);
+ if (ret)
+ goto err_hw;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+ return 0;
+err_hw:
+ mt76x0_cleanup(dev);
+err:
+ usb_set_intfdata(usb_intf, NULL);
+ usb_put_dev(interface_to_usbdev(usb_intf));
+
+ destroy_workqueue(dev->stat_wq);
+ ieee80211_free_hw(dev->mt76.hw);
+ return ret;
+}
+
+static void mt76x0_disconnect(struct usb_interface *usb_intf)
+{
+ struct mt76x0_dev *dev = usb_get_intfdata(usb_intf);
+ bool initalized = test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+ if (!initalized)
+ return;
+
+ ieee80211_unregister_hw(dev->mt76.hw);
+ mt76x0_cleanup(dev);
+
+ usb_set_intfdata(usb_intf, NULL);
+ usb_put_dev(interface_to_usbdev(usb_intf));
+
+ destroy_workqueue(dev->stat_wq);
+ ieee80211_free_hw(dev->mt76.hw);
+}
+
+static int mt76x0_suspend(struct usb_interface *usb_intf, pm_message_t state)
+{
+ struct mt76x0_dev *dev = usb_get_intfdata(usb_intf);
+
+ mt76x0_cleanup(dev);
+
+ return 0;
+}
+
+static int mt76x0_resume(struct usb_interface *usb_intf)
+{
+ struct mt76x0_dev *dev = usb_get_intfdata(usb_intf);
+ int ret;
+
+ ret = mt76x0_init_hardware(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(usb, mt76x0_device_table);
+MODULE_FIRMWARE(MT7610_FIRMWARE);
+MODULE_LICENSE("GPL");
+
+static struct usb_driver mt76x0_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt76x0_device_table,
+ .probe = mt76x0_probe,
+ .disconnect = mt76x0_disconnect,
+ .suspend = mt76x0_suspend,
+ .resume = mt76x0_resume,
+ .reset_resume = mt76x0_resume,
+ .soft_unbind = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(mt76x0_driver);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h
new file mode 100644
index 000000000000..492e431390a8
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76X0U_USB_H
+#define __MT76X0U_USB_H
+
+#include "mt76x0.h"
+
+#define MT7610_FIRMWARE "mediatek/mt7610u.bin"
+
+#define MT_VEND_REQ_MAX_RETRY 10
+#define MT_VEND_REQ_TOUT_MS 300
+
+#define MT_VEND_DEV_MODE_RESET 1
+
+#define MT_VEND_BUF sizeof(__le32)
+
+static inline struct usb_device *mt76x0_to_usb_dev(struct mt76x0_dev *mt76x0)
+{
+ return interface_to_usbdev(to_usb_interface(mt76x0->mt76.dev));
+}
+
+static inline struct usb_device *mt76_to_usb_dev(struct mt76_dev *mt76)
+{
+ return interface_to_usbdev(to_usb_interface(mt76->dev));
+}
+
+static inline bool mt76x0_urb_has_error(struct urb *urb)
+{
+ return urb->status &&
+ urb->status != -ENOENT &&
+ urb->status != -ECONNRESET &&
+ urb->status != -ESHUTDOWN;
+}
+
+bool mt76x0_usb_alloc_buf(struct mt76x0_dev *dev, size_t len,
+ struct mt76x0_dma_buf *buf);
+void mt76x0_usb_free_buf(struct mt76x0_dev *dev, struct mt76x0_dma_buf *buf);
+int mt76x0_usb_submit_buf(struct mt76x0_dev *dev, int dir, int ep_idx,
+ struct mt76x0_dma_buf *buf, gfp_t gfp,
+ usb_complete_t complete_fn, void *context);
+void mt76x0_complete_urb(struct urb *urb);
+
+int mt76x0_vendor_request(struct mt76x0_dev *dev, const u8 req,
+ const u8 direction, const u16 val, const u16 offset,
+ void *buf, const size_t buflen);
+void mt76x0_vendor_reset(struct mt76x0_dev *dev);
+int mt76x0_vendor_single_wr(struct mt76x0_dev *dev, const u8 req,
+ const u16 offset, const u32 val);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/util.c b/drivers/net/wireless/mediatek/mt76/mt76x0/util.c
new file mode 100644
index 000000000000..7856dd760419
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/util.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+
+void mt76x0_remove_hdr_pad(struct sk_buff *skb)
+{
+ int len = ieee80211_get_hdrlen_from_skb(skb);
+
+ memmove(skb->data + 2, skb->data, len);
+ skb_pull(skb, 2);
+}
+
+int mt76x0_insert_hdr_pad(struct sk_buff *skb)
+{
+ int len = ieee80211_get_hdrlen_from_skb(skb);
+ int ret;
+
+ if (len % 4 == 0)
+ return 0;
+
+ ret = skb_cow(skb, 2);
+ if (ret)
+ return ret;
+
+ skb_push(skb, 2);
+ memmove(skb->data, skb->data + 2, len);
+
+ skb->data[len] = 0;
+ skb->data[len + 1] = 0;
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h
index dc12bbdbb2ee..dca3209bf5f1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h
@@ -27,11 +27,15 @@
#include <linux/mutex.h>
#include <linux/bitops.h>
#include <linux/kfifo.h>
+#include <linux/average.h>
#define MT7662_FIRMWARE "mt7662.bin"
#define MT7662_ROM_PATCH "mt7662_rom_patch.bin"
#define MT7662_EEPROM_SIZE 512
+#define MT7662U_FIRMWARE "mediatek/mt7662u.bin"
+#define MT7662U_ROM_PATCH "mediatek/mt7662u_rom_patch.bin"
+
#define MT76x2_RX_RING_SIZE 256
#define MT_RX_HEADROOM 32
@@ -47,11 +51,14 @@
#include "mt76x2_mac.h"
#include "mt76x2_dfs.h"
+DECLARE_EWMA(signal, 10, 8)
+
struct mt76x2_mcu {
struct mutex mutex;
wait_queue_head_t wait;
struct sk_buff_head res_q;
+ struct mt76u_buf res_u;
u32 msg_seq;
};
@@ -69,9 +76,8 @@ struct mt76x2_calibration {
u8 agc_gain_init[MT_MAX_CHAINS];
u8 agc_gain_cur[MT_MAX_CHAINS];
- int avg_rssi[MT_MAX_CHAINS];
- int avg_rssi_all;
-
+ u16 false_cca;
+ s8 avg_rssi_all;
s8 agc_gain_adjust;
s8 low_gain;
@@ -120,10 +126,13 @@ struct mt76x2_dev {
u8 beacon_mask;
u8 beacon_data_mask;
- u32 rxfilter;
+ u8 tbtt_count;
+ u16 beacon_int;
u16 chainmask;
+ u32 rxfilter;
+
struct mt76x2_calibration cal;
s8 target_power;
@@ -149,8 +158,28 @@ struct mt76x2_sta {
struct mt76x2_vif *vif;
struct mt76x2_tx_status status;
int n_frames;
+
+ struct ewma_signal rssi;
+ int inactive_count;
};
+static inline bool mt76x2_wait_for_mac(struct mt76x2_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < 500; i++) {
+ switch (mt76_rr(dev, MT_MAC_CSR0)) {
+ case 0:
+ case ~0:
+ break;
+ default:
+ return true;
+ }
+ usleep_range(5000, 10000);
+ }
+ return false;
+}
+
static inline bool is_mt7612(struct mt76x2_dev *dev)
{
return mt76_chip(&dev->mt76) == 0x7612;
@@ -158,6 +187,14 @@ static inline bool is_mt7612(struct mt76x2_dev *dev)
void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set);
+static inline bool mt76x2_channel_silent(struct mt76x2_dev *dev)
+{
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+
+ return ((chan->flags & IEEE80211_CHAN_RADAR) &&
+ chan->dfs_state != NL80211_DFS_AVAILABLE);
+}
+
static inline void mt76x2_irq_enable(struct mt76x2_dev *dev, u32 mask)
{
mt76x2_set_irq_mask(dev, 0, mask);
@@ -168,11 +205,29 @@ static inline void mt76x2_irq_disable(struct mt76x2_dev *dev, u32 mask)
mt76x2_set_irq_mask(dev, mask, 0);
}
+static inline bool mt76x2_wait_for_bbp(struct mt76x2_dev *dev)
+{
+ return mt76_poll_msec(dev, MT_MAC_STATUS,
+ MT_MAC_STATUS_TX | MT_MAC_STATUS_RX,
+ 0, 100);
+}
+
+static inline bool wait_for_wpdma(struct mt76x2_dev *dev)
+{
+ return mt76_poll(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
+ 0, 1000);
+}
+
extern const struct ieee80211_ops mt76x2_ops;
+extern struct ieee80211_rate mt76x2_rates[12];
+
struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev);
int mt76x2_register_device(struct mt76x2_dev *dev);
void mt76x2_init_debugfs(struct mt76x2_dev *dev);
+void mt76x2_init_device(struct mt76x2_dev *dev);
irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance);
void mt76x2_phy_power_on(struct mt76x2_dev *dev);
@@ -186,7 +241,7 @@ void mt76x2_phy_set_antenna(struct mt76x2_dev *dev);
int mt76x2_phy_start(struct mt76x2_dev *dev);
int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
struct cfg80211_chan_def *chandef);
-int mt76x2_phy_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain);
+int mt76x2_mac_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain);
void mt76x2_phy_calibrate(struct work_struct *work);
void mt76x2_phy_set_txpower(struct mt76x2_dev *dev);
@@ -214,6 +269,7 @@ int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
u32 *tx_info);
void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
struct mt76_queue_entry *e, bool flush);
+void mt76x2_mac_set_tx_protection(struct mt76x2_dev *dev, u32 val);
void mt76x2_pre_tbtt_tasklet(unsigned long arg);
@@ -230,4 +286,45 @@ s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj);
void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr);
+int mt76x2_insert_hdr_pad(struct sk_buff *skb);
+
+bool mt76x2_mac_load_tx_status(struct mt76x2_dev *dev,
+ struct mt76x2_tx_status *stat);
+void mt76x2_send_tx_status(struct mt76x2_dev *dev,
+ struct mt76x2_tx_status *stat, u8 *update);
+void mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable);
+void mt76x2_init_txpower(struct mt76x2_dev *dev,
+ struct ieee80211_supported_band *sband);
+void mt76_write_mac_initvals(struct mt76x2_dev *dev);
+
+int mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params);
+int mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt76x2_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+int mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+int mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params);
+void mt76x2_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast);
+void mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq);
+void mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+
+void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev,
+ enum nl80211_band band);
+void mt76x2_configure_tx_delay(struct mt76x2_dev *dev,
+ enum nl80211_band band, u8 bw);
+void mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl);
+void mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper);
+int mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev);
+void mt76x2_apply_gain_adj(struct mt76x2_dev *dev);
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_common.c
new file mode 100644
index 000000000000..a2338ba139b4
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_common.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+
+void mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq)
+{
+ struct mt76_txq *mtxq;
+
+ if (!txq)
+ return;
+
+ mtxq = (struct mt76_txq *) txq->drv_priv;
+ if (txq->sta) {
+ struct mt76x2_sta *sta;
+
+ sta = (struct mt76x2_sta *) txq->sta->drv_priv;
+ mtxq->wcid = &sta->wcid;
+ } else {
+ struct mt76x2_vif *mvif;
+
+ mvif = (struct mt76x2_vif *) txq->vif->drv_priv;
+ mtxq->wcid = &mvif->group_wcid;
+ }
+
+ mt76_txq_init(&dev->mt76, txq);
+}
+EXPORT_SYMBOL_GPL(mt76x2_txq_init);
+
+int mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ struct ieee80211_sta *sta = params->sta;
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+ struct ieee80211_txq *txq = sta->txq[params->tid];
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
+ struct mt76_txq *mtxq;
+
+ if (!txq)
+ return -EINVAL;
+
+ mtxq = (struct mt76_txq *)txq->drv_priv;
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn, params->buf_size);
+ mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
+ mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
+ BIT(16 + tid));
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ mtxq->aggr = true;
+ mtxq->send_bar = false;
+ ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ mtxq->aggr = false;
+ ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ mtxq->agg_ssn = *ssn << 4;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ mtxq->aggr = false;
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_ampdu_action);
+
+int mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+ struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+ int ret = 0;
+ int idx = 0;
+ int i;
+
+ mutex_lock(&dev->mutex);
+
+ idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ msta->vif = mvif;
+ msta->wcid.sta = 1;
+ msta->wcid.idx = idx;
+ msta->wcid.hw_key_idx = -1;
+ mt76x2_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
+ mt76x2_mac_wcid_set_drop(dev, idx, false);
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ mt76x2_txq_init(dev, sta->txq[i]);
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
+
+ ewma_signal_init(&msta->rssi);
+
+ rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
+
+out:
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76x2_sta_add);
+
+int mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+ int idx = msta->wcid.idx;
+ int i;
+
+ mutex_lock(&dev->mutex);
+ rcu_assign_pointer(dev->wcid[idx], NULL);
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ mt76_txq_remove(&dev->mt76, sta->txq[i]);
+ mt76x2_mac_wcid_set_drop(dev, idx, true);
+ mt76_wcid_free(dev->wcid_mask, idx);
+ mt76x2_mac_wcid_setup(dev, idx, 0, NULL);
+ mutex_unlock(&dev->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_sta_remove);
+
+void mt76x2_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ mt76_txq_remove(&dev->mt76, vif->txq);
+}
+EXPORT_SYMBOL_GPL(mt76x2_remove_interface);
+
+int mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+ struct mt76x2_sta *msta;
+ struct mt76_wcid *wcid;
+ int idx = key->keyidx;
+ int ret;
+
+ /* fall back to sw encryption for unsupported ciphers */
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * The hardware does not support per-STA RX GTK, fall back
+ * to software mode for these.
+ */
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
+ (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+ key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
+ msta = sta ? (struct mt76x2_sta *) sta->drv_priv : NULL;
+ wcid = msta ? &msta->wcid : &mvif->group_wcid;
+
+ if (cmd == SET_KEY) {
+ key->hw_key_idx = wcid->idx;
+ wcid->hw_key_idx = idx;
+ if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ wcid->sw_iv = true;
+ }
+ } else {
+ if (idx == wcid->hw_key_idx) {
+ wcid->hw_key_idx = -1;
+ wcid->sw_iv = true;
+ }
+
+ key = NULL;
+ }
+ mt76_wcid_key_setup(&dev->mt76, wcid, key);
+
+ if (!msta) {
+ if (key || wcid->hw_key_idx == idx) {
+ ret = mt76x2_mac_wcid_set_key(dev, wcid->idx, key);
+ if (ret)
+ return ret;
+ }
+
+ return mt76x2_mac_shared_key_setup(dev, mvif->idx, idx, key);
+ }
+
+ return mt76x2_mac_wcid_set_key(dev, msta->wcid.idx, key);
+}
+EXPORT_SYMBOL_GPL(mt76x2_set_key);
+
+int mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ u8 cw_min = 5, cw_max = 10, qid;
+ u32 val;
+
+ qid = dev->mt76.q_tx[queue].hw_idx;
+
+ if (params->cw_min)
+ cw_min = fls(params->cw_min);
+ if (params->cw_max)
+ cw_max = fls(params->cw_max);
+
+ val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) |
+ FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
+ FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
+ FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
+ mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
+
+ val = mt76_rr(dev, MT_WMM_TXOP(qid));
+ val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid));
+ val |= params->txop << MT_WMM_TXOP_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_TXOP(qid), val);
+
+ val = mt76_rr(dev, MT_WMM_AIFSN);
+ val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid));
+ val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_AIFSN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMIN);
+ val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid));
+ val |= cw_min << MT_WMM_CWMIN_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_CWMIN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMAX);
+ val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid));
+ val |= cw_max << MT_WMM_CWMAX_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_CWMAX, val);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_conf_tx);
+
+void mt76x2_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+ flags |= *total_flags & FIF_##_flag; \
+ dev->rxfilter &= ~(_hw); \
+ dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ } while (0)
+
+ mutex_lock(&dev->mutex);
+
+ dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
+
+ MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
+ MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
+ MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
+ MT_RX_FILTR_CFG_CTS |
+ MT_RX_FILTR_CFG_CFEND |
+ MT_RX_FILTR_CFG_CFACK |
+ MT_RX_FILTR_CFG_BA |
+ MT_RX_FILTR_CFG_CTRL_RSV);
+ MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
+
+ *total_flags = flags;
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+ mutex_unlock(&dev->mutex);
+}
+EXPORT_SYMBOL_GPL(mt76x2_configure_filter);
+
+void mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+ struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates);
+ struct ieee80211_tx_rate rate = {};
+
+ if (!rates)
+ return;
+
+ rate.idx = rates->rate[0].idx;
+ rate.flags = rates->rate[0].flags;
+ mt76x2_mac_wcid_set_rate(dev, &msta->wcid, &rate);
+ msta->wcid.max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, &rate);
+}
+EXPORT_SYMBOL_GPL(mt76x2_sta_rate_tbl_update);
+
+void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb)
+{
+ struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+ void *rxwi = skb->data;
+
+ if (q == MT_RXQ_MCU) {
+ skb_queue_tail(&dev->mcu.res_q, skb);
+ wake_up(&dev->mcu.wait);
+ return;
+ }
+
+ skb_pull(skb, sizeof(struct mt76x2_rxwi));
+ if (mt76x2_mac_process_rx(dev, skb, rxwi)) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ mt76_rx(&dev->mt76, q, skb);
+}
+EXPORT_SYMBOL_GPL(mt76x2_queue_rx_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c
index 955ea3e692dd..77b5ff1be05f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c
@@ -91,12 +91,20 @@ mt76x2_dfs_stat_read(struct seq_file *file, void *data)
struct mt76x2_dev *dev = file->private;
struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ seq_printf(file, "allocated sequences:\t%d\n",
+ dfs_pd->seq_stats.seq_pool_len);
+ seq_printf(file, "used sequences:\t\t%d\n",
+ dfs_pd->seq_stats.seq_len);
+ seq_puts(file, "\n");
+
for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
seq_printf(file, "engine: %d\n", i);
seq_printf(file, " hw pattern detected:\t%d\n",
dfs_pd->stats[i].hw_pattern);
seq_printf(file, " hw pulse discarded:\t%d\n",
dfs_pd->stats[i].hw_pulse_discarded);
+ seq_printf(file, " sw pattern detected:\t%d\n",
+ dfs_pd->stats[i].sw_pattern);
}
return 0;
@@ -115,6 +123,18 @@ static const struct file_operations fops_dfs_stat = {
.release = single_release,
};
+static int read_agc(struct seq_file *file, void *data)
+{
+ struct mt76x2_dev *dev = dev_get_drvdata(file->private);
+
+ seq_printf(file, "avg_rssi: %d\n", dev->cal.avg_rssi_all);
+ seq_printf(file, "low_gain: %d\n", dev->cal.low_gain);
+ seq_printf(file, "false_cca: %d\n", dev->cal.false_cca);
+ seq_printf(file, "agc_gain_adjust: %d\n", dev->cal.agc_gain_adjust);
+
+ return 0;
+}
+
void mt76x2_init_debugfs(struct mt76x2_dev *dev)
{
struct dentry *dir;
@@ -130,4 +150,7 @@ void mt76x2_init_debugfs(struct mt76x2_dev *dev)
debugfs_create_file("dfs_stats", 0400, dir, dev, &fops_dfs_stat);
debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir,
read_txpower);
+
+ debugfs_create_devm_seqfile(dev->mt76.dev, "agc", dir, read_agc);
}
+EXPORT_SYMBOL_GPL(mt76x2_init_debugfs);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c
index f936dc9a5476..374cc655c11d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c
@@ -159,6 +159,81 @@ static void mt76x2_dfs_set_capture_mode_ctrl(struct mt76x2_dev *dev,
mt76_wr(dev, MT_BBP(DFS, 36), data);
}
+static void mt76x2_dfs_seq_pool_put(struct mt76x2_dev *dev,
+ struct mt76x2_dfs_sequence *seq)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+ list_add(&seq->head, &dfs_pd->seq_pool);
+
+ dfs_pd->seq_stats.seq_pool_len++;
+ dfs_pd->seq_stats.seq_len--;
+}
+
+static
+struct mt76x2_dfs_sequence *mt76x2_dfs_seq_pool_get(struct mt76x2_dev *dev)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x2_dfs_sequence *seq;
+
+ if (list_empty(&dfs_pd->seq_pool)) {
+ seq = devm_kzalloc(dev->mt76.dev, sizeof(*seq), GFP_ATOMIC);
+ } else {
+ seq = list_first_entry(&dfs_pd->seq_pool,
+ struct mt76x2_dfs_sequence,
+ head);
+ list_del(&seq->head);
+ dfs_pd->seq_stats.seq_pool_len--;
+ }
+ if (seq)
+ dfs_pd->seq_stats.seq_len++;
+
+ return seq;
+}
+
+static int mt76x2_dfs_get_multiple(int val, int frac, int margin)
+{
+ int remainder, factor;
+
+ if (!frac)
+ return 0;
+
+ if (abs(val - frac) <= margin)
+ return 1;
+
+ factor = val / frac;
+ remainder = val % frac;
+
+ if (remainder > margin) {
+ if ((frac - remainder) <= margin)
+ factor++;
+ else
+ factor = 0;
+ }
+ return factor;
+}
+
+static void mt76x2_dfs_detector_reset(struct mt76x2_dev *dev)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x2_dfs_sequence *seq, *tmp_seq;
+ int i;
+
+ /* reset hw detector */
+ mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+
+ /* reset sw detector */
+ for (i = 0; i < ARRAY_SIZE(dfs_pd->event_rb); i++) {
+ dfs_pd->event_rb[i].h_rb = 0;
+ dfs_pd->event_rb[i].t_rb = 0;
+ }
+
+ list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) {
+ list_del_init(&seq->head);
+ mt76x2_dfs_seq_pool_put(dev, seq);
+ }
+}
+
static bool mt76x2_dfs_check_chirp(struct mt76x2_dev *dev)
{
bool ret = false;
@@ -295,6 +370,256 @@ static bool mt76x2_dfs_check_hw_pulse(struct mt76x2_dev *dev,
return ret;
}
+static bool mt76x2_dfs_fetch_event(struct mt76x2_dev *dev,
+ struct mt76x2_dfs_event *event)
+{
+ u32 data;
+
+ /* 1st: DFS_R37[31]: 0 (engine 0) - 1 (engine 2)
+ * 2nd: DFS_R37[21:0]: pulse time
+ * 3rd: DFS_R37[11:0]: pulse width
+ * 3rd: DFS_R37[25:16]: phase
+ * 4th: DFS_R37[12:0]: current pwr
+ * 4th: DFS_R37[21:16]: pwr stable counter
+ *
+ * 1st: DFS_R37[31:0] set to 0xffffffff means no event detected
+ */
+ data = mt76_rr(dev, MT_BBP(DFS, 37));
+ if (!MT_DFS_CHECK_EVENT(data))
+ return false;
+
+ event->engine = MT_DFS_EVENT_ENGINE(data);
+ data = mt76_rr(dev, MT_BBP(DFS, 37));
+ event->ts = MT_DFS_EVENT_TIMESTAMP(data);
+ data = mt76_rr(dev, MT_BBP(DFS, 37));
+ event->width = MT_DFS_EVENT_WIDTH(data);
+
+ return true;
+}
+
+static bool mt76x2_dfs_check_event(struct mt76x2_dev *dev,
+ struct mt76x2_dfs_event *event)
+{
+ if (event->engine == 2) {
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x2_dfs_event_rb *event_buff = &dfs_pd->event_rb[1];
+ u16 last_event_idx;
+ u32 delta_ts;
+
+ last_event_idx = mt76_decr(event_buff->t_rb,
+ MT_DFS_EVENT_BUFLEN);
+ delta_ts = event->ts - event_buff->data[last_event_idx].ts;
+ if (delta_ts < MT_DFS_EVENT_TIME_MARGIN &&
+ event_buff->data[last_event_idx].width >= 200)
+ return false;
+ }
+ return true;
+}
+
+static void mt76x2_dfs_queue_event(struct mt76x2_dev *dev,
+ struct mt76x2_dfs_event *event)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x2_dfs_event_rb *event_buff;
+
+ /* add radar event to ring buffer */
+ event_buff = event->engine == 2 ? &dfs_pd->event_rb[1]
+ : &dfs_pd->event_rb[0];
+ event_buff->data[event_buff->t_rb] = *event;
+ event_buff->data[event_buff->t_rb].fetch_ts = jiffies;
+
+ event_buff->t_rb = mt76_incr(event_buff->t_rb, MT_DFS_EVENT_BUFLEN);
+ if (event_buff->t_rb == event_buff->h_rb)
+ event_buff->h_rb = mt76_incr(event_buff->h_rb,
+ MT_DFS_EVENT_BUFLEN);
+}
+
+static int mt76x2_dfs_create_sequence(struct mt76x2_dev *dev,
+ struct mt76x2_dfs_event *event,
+ u16 cur_len)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x2_dfs_sw_detector_params *sw_params;
+ u32 width_delta, with_sum, factor, cur_pri;
+ struct mt76x2_dfs_sequence seq, *seq_p;
+ struct mt76x2_dfs_event_rb *event_rb;
+ struct mt76x2_dfs_event *cur_event;
+ int i, j, end, pri;
+
+ event_rb = event->engine == 2 ? &dfs_pd->event_rb[1]
+ : &dfs_pd->event_rb[0];
+
+ i = mt76_decr(event_rb->t_rb, MT_DFS_EVENT_BUFLEN);
+ end = mt76_decr(event_rb->h_rb, MT_DFS_EVENT_BUFLEN);
+
+ while (i != end) {
+ cur_event = &event_rb->data[i];
+ with_sum = event->width + cur_event->width;
+
+ sw_params = &dfs_pd->sw_dpd_params;
+ switch (dev->dfs_pd.region) {
+ case NL80211_DFS_FCC:
+ case NL80211_DFS_JP:
+ if (with_sum < 600)
+ width_delta = 8;
+ else
+ width_delta = with_sum >> 3;
+ break;
+ case NL80211_DFS_ETSI:
+ if (event->engine == 2)
+ width_delta = with_sum >> 6;
+ else if (with_sum < 620)
+ width_delta = 24;
+ else
+ width_delta = 8;
+ break;
+ case NL80211_DFS_UNSET:
+ default:
+ return -EINVAL;
+ }
+
+ pri = event->ts - cur_event->ts;
+ if (abs(event->width - cur_event->width) > width_delta ||
+ pri < sw_params->min_pri)
+ goto next;
+
+ if (pri > sw_params->max_pri)
+ break;
+
+ seq.pri = event->ts - cur_event->ts;
+ seq.first_ts = cur_event->ts;
+ seq.last_ts = event->ts;
+ seq.engine = event->engine;
+ seq.count = 2;
+
+ j = mt76_decr(i, MT_DFS_EVENT_BUFLEN);
+ while (j != end) {
+ cur_event = &event_rb->data[j];
+ cur_pri = event->ts - cur_event->ts;
+ factor = mt76x2_dfs_get_multiple(cur_pri, seq.pri,
+ sw_params->pri_margin);
+ if (factor > 0) {
+ seq.first_ts = cur_event->ts;
+ seq.count++;
+ }
+
+ j = mt76_decr(j, MT_DFS_EVENT_BUFLEN);
+ }
+ if (seq.count <= cur_len)
+ goto next;
+
+ seq_p = mt76x2_dfs_seq_pool_get(dev);
+ if (!seq_p)
+ return -ENOMEM;
+
+ *seq_p = seq;
+ INIT_LIST_HEAD(&seq_p->head);
+ list_add(&seq_p->head, &dfs_pd->sequences);
+next:
+ i = mt76_decr(i, MT_DFS_EVENT_BUFLEN);
+ }
+ return 0;
+}
+
+static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x2_dev *dev,
+ struct mt76x2_dfs_event *event)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x2_dfs_sw_detector_params *sw_params;
+ struct mt76x2_dfs_sequence *seq, *tmp_seq;
+ u16 max_seq_len = 0;
+ u32 factor, pri;
+
+ sw_params = &dfs_pd->sw_dpd_params;
+ list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) {
+ if (event->ts > seq->first_ts + MT_DFS_SEQUENCE_WINDOW) {
+ list_del_init(&seq->head);
+ mt76x2_dfs_seq_pool_put(dev, seq);
+ continue;
+ }
+
+ if (event->engine != seq->engine)
+ continue;
+
+ pri = event->ts - seq->last_ts;
+ factor = mt76x2_dfs_get_multiple(pri, seq->pri,
+ sw_params->pri_margin);
+ if (factor > 0) {
+ seq->last_ts = event->ts;
+ seq->count++;
+ max_seq_len = max_t(u16, max_seq_len, seq->count);
+ }
+ }
+ return max_seq_len;
+}
+
+static bool mt76x2_dfs_check_detection(struct mt76x2_dev *dev)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x2_dfs_sequence *seq;
+
+ if (list_empty(&dfs_pd->sequences))
+ return false;
+
+ list_for_each_entry(seq, &dfs_pd->sequences, head) {
+ if (seq->count > MT_DFS_SEQUENCE_TH) {
+ dfs_pd->stats[seq->engine].sw_pattern++;
+ return true;
+ }
+ }
+ return false;
+}
+
+static void mt76x2_dfs_add_events(struct mt76x2_dev *dev)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x2_dfs_event event;
+ int i, seq_len;
+
+ /* disable debug mode */
+ mt76x2_dfs_set_capture_mode_ctrl(dev, false);
+ for (i = 0; i < MT_DFS_EVENT_LOOP; i++) {
+ if (!mt76x2_dfs_fetch_event(dev, &event))
+ break;
+
+ if (dfs_pd->last_event_ts > event.ts)
+ mt76x2_dfs_detector_reset(dev);
+ dfs_pd->last_event_ts = event.ts;
+
+ if (!mt76x2_dfs_check_event(dev, &event))
+ continue;
+
+ seq_len = mt76x2_dfs_add_event_to_sequence(dev, &event);
+ mt76x2_dfs_create_sequence(dev, &event, seq_len);
+
+ mt76x2_dfs_queue_event(dev, &event);
+ }
+ mt76x2_dfs_set_capture_mode_ctrl(dev, true);
+}
+
+static void mt76x2_dfs_check_event_window(struct mt76x2_dev *dev)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x2_dfs_event_rb *event_buff;
+ struct mt76x2_dfs_event *event;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dfs_pd->event_rb); i++) {
+ event_buff = &dfs_pd->event_rb[i];
+
+ while (event_buff->h_rb != event_buff->t_rb) {
+ event = &event_buff->data[event_buff->h_rb];
+
+ /* sorted list */
+ if (time_is_after_jiffies(event->fetch_ts +
+ MT_DFS_EVENT_WINDOW))
+ break;
+ event_buff->h_rb = mt76_incr(event_buff->h_rb,
+ MT_DFS_EVENT_BUFLEN);
+ }
+ }
+}
+
static void mt76x2_dfs_tasklet(unsigned long arg)
{
struct mt76x2_dev *dev = (struct mt76x2_dev *)arg;
@@ -305,6 +630,24 @@ static void mt76x2_dfs_tasklet(unsigned long arg)
if (test_bit(MT76_SCANNING, &dev->mt76.state))
goto out;
+ if (time_is_before_jiffies(dfs_pd->last_sw_check +
+ MT_DFS_SW_TIMEOUT)) {
+ bool radar_detected;
+
+ dfs_pd->last_sw_check = jiffies;
+
+ mt76x2_dfs_add_events(dev);
+ radar_detected = mt76x2_dfs_check_detection(dev);
+ if (radar_detected) {
+ /* sw detector rx radar pattern */
+ ieee80211_radar_detected(dev->mt76.hw);
+ mt76x2_dfs_detector_reset(dev);
+
+ return;
+ }
+ mt76x2_dfs_check_event_window(dev);
+ }
+
engine_mask = mt76_rr(dev, MT_BBP(DFS, 1));
if (!(engine_mask & 0xf))
goto out;
@@ -326,9 +669,7 @@ static void mt76x2_dfs_tasklet(unsigned long arg)
/* hw detector rx radar pattern */
dfs_pd->stats[i].hw_pattern++;
ieee80211_radar_detected(dev->mt76.hw);
-
- /* reset hw detector */
- mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+ mt76x2_dfs_detector_reset(dev);
return;
}
@@ -340,6 +681,32 @@ out:
mt76x2_irq_enable(dev, MT_INT_GPTIMER);
}
+static void mt76x2_dfs_init_sw_detector(struct mt76x2_dev *dev)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+ switch (dev->dfs_pd.region) {
+ case NL80211_DFS_FCC:
+ dfs_pd->sw_dpd_params.max_pri = MT_DFS_FCC_MAX_PRI;
+ dfs_pd->sw_dpd_params.min_pri = MT_DFS_FCC_MIN_PRI;
+ dfs_pd->sw_dpd_params.pri_margin = MT_DFS_PRI_MARGIN;
+ break;
+ case NL80211_DFS_ETSI:
+ dfs_pd->sw_dpd_params.max_pri = MT_DFS_ETSI_MAX_PRI;
+ dfs_pd->sw_dpd_params.min_pri = MT_DFS_ETSI_MIN_PRI;
+ dfs_pd->sw_dpd_params.pri_margin = MT_DFS_PRI_MARGIN << 2;
+ break;
+ case NL80211_DFS_JP:
+ dfs_pd->sw_dpd_params.max_pri = MT_DFS_JP_MAX_PRI;
+ dfs_pd->sw_dpd_params.min_pri = MT_DFS_JP_MIN_PRI;
+ dfs_pd->sw_dpd_params.pri_margin = MT_DFS_PRI_MARGIN;
+ break;
+ case NL80211_DFS_UNSET:
+ default:
+ break;
+ }
+}
+
static void mt76x2_dfs_set_bbp_params(struct mt76x2_dev *dev)
{
u32 data;
@@ -462,6 +829,7 @@ void mt76x2_dfs_init_params(struct mt76x2_dev *dev)
if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
dev->dfs_pd.region != NL80211_DFS_UNSET) {
+ mt76x2_dfs_init_sw_detector(dev);
mt76x2_dfs_set_bbp_params(dev);
/* enable debug mode */
mt76x2_dfs_set_capture_mode_ctrl(dev, true);
@@ -486,7 +854,10 @@ void mt76x2_dfs_init_detector(struct mt76x2_dev *dev)
{
struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ INIT_LIST_HEAD(&dfs_pd->sequences);
+ INIT_LIST_HEAD(&dfs_pd->seq_pool);
dfs_pd->region = NL80211_DFS_UNSET;
+ dfs_pd->last_sw_check = jiffies;
tasklet_init(&dfs_pd->dfs_tasklet, mt76x2_dfs_tasklet,
(unsigned long)dev);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h
index 8dbc783cc6bc..693f421bf096 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h
@@ -33,6 +33,22 @@
#define MT_DFS_PKT_END_MASK 0
#define MT_DFS_CH_EN 0xf
+/* sw detector params */
+#define MT_DFS_EVENT_LOOP 64
+#define MT_DFS_SW_TIMEOUT (HZ / 20)
+#define MT_DFS_EVENT_WINDOW (HZ / 5)
+#define MT_DFS_SEQUENCE_WINDOW (200 * (1 << 20))
+#define MT_DFS_EVENT_TIME_MARGIN 2000
+#define MT_DFS_PRI_MARGIN 4
+#define MT_DFS_SEQUENCE_TH 6
+
+#define MT_DFS_FCC_MAX_PRI ((28570 << 1) + 1000)
+#define MT_DFS_FCC_MIN_PRI (3000 - 2)
+#define MT_DFS_JP_MAX_PRI ((80000 << 1) + 1000)
+#define MT_DFS_JP_MIN_PRI (28500 - 2)
+#define MT_DFS_ETSI_MAX_PRI (133333 + 125000 + 117647 + 1000)
+#define MT_DFS_ETSI_MIN_PRI (4500 - 20)
+
struct mt76x2_radar_specs {
u8 mode;
u16 avg_len;
@@ -50,6 +66,32 @@ struct mt76x2_radar_specs {
u16 pwr_jmp;
};
+#define MT_DFS_CHECK_EVENT(x) ((x) != GENMASK(31, 0))
+#define MT_DFS_EVENT_ENGINE(x) (((x) & BIT(31)) ? 2 : 0)
+#define MT_DFS_EVENT_TIMESTAMP(x) ((x) & GENMASK(21, 0))
+#define MT_DFS_EVENT_WIDTH(x) ((x) & GENMASK(11, 0))
+struct mt76x2_dfs_event {
+ unsigned long fetch_ts;
+ u32 ts;
+ u16 width;
+ u8 engine;
+};
+
+#define MT_DFS_EVENT_BUFLEN 256
+struct mt76x2_dfs_event_rb {
+ struct mt76x2_dfs_event data[MT_DFS_EVENT_BUFLEN];
+ int h_rb, t_rb;
+};
+
+struct mt76x2_dfs_sequence {
+ struct list_head head;
+ u32 first_ts;
+ u32 last_ts;
+ u32 pri;
+ u16 count;
+ u8 engine;
+};
+
struct mt76x2_dfs_hw_pulse {
u8 engine;
u32 period;
@@ -58,9 +100,21 @@ struct mt76x2_dfs_hw_pulse {
u32 burst;
};
+struct mt76x2_dfs_sw_detector_params {
+ u32 min_pri;
+ u32 max_pri;
+ u32 pri_margin;
+};
+
struct mt76x2_dfs_engine_stats {
u32 hw_pattern;
u32 hw_pulse_discarded;
+ u32 sw_pattern;
+};
+
+struct mt76x2_dfs_seq_stats {
+ u32 seq_pool_len;
+ u32 seq_len;
};
struct mt76x2_dfs_pattern_detector {
@@ -69,6 +123,16 @@ struct mt76x2_dfs_pattern_detector {
u8 chirp_pulse_cnt;
u32 chirp_pulse_ts;
+ struct mt76x2_dfs_sw_detector_params sw_dpd_params;
+ struct mt76x2_dfs_event_rb event_rb[2];
+
+ struct list_head sequences;
+ struct list_head seq_pool;
+ struct mt76x2_dfs_seq_stats seq_stats;
+
+ unsigned long last_sw_check;
+ u32 last_event_ts;
+
struct mt76x2_dfs_engine_stats stats[MT_DFS_NUM_ENGINES];
struct tasklet_struct dfs_tasklet;
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
index fd1ec4743e0b..6720a6a1313f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
@@ -66,27 +66,6 @@ mt76x2_init_tx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
return 0;
}
-void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- struct sk_buff *skb)
-{
- struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
- void *rxwi = skb->data;
-
- if (q == MT_RXQ_MCU) {
- skb_queue_tail(&dev->mcu.res_q, skb);
- wake_up(&dev->mcu.wait);
- return;
- }
-
- skb_pull(skb, sizeof(struct mt76x2_rxwi));
- if (mt76x2_mac_process_rx(dev, skb, rxwi)) {
- dev_kfree_skb(skb);
- return;
- }
-
- mt76_rx(&dev->mt76, q, skb);
-}
-
static int
mt76x2_init_rx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
int idx, int n_desc, int bufsize)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
index e9d426bbf91a..da294558c268 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
@@ -19,34 +19,6 @@
#include "dma.h"
-#define MT_TXD_INFO_LEN GENMASK(15, 0)
-#define MT_TXD_INFO_NEXT_VLD BIT(16)
-#define MT_TXD_INFO_TX_BURST BIT(17)
-#define MT_TXD_INFO_80211 BIT(19)
-#define MT_TXD_INFO_TSO BIT(20)
-#define MT_TXD_INFO_CSO BIT(21)
-#define MT_TXD_INFO_WIV BIT(24)
-#define MT_TXD_INFO_QSEL GENMASK(26, 25)
-#define MT_TXD_INFO_DPORT GENMASK(29, 27)
-#define MT_TXD_INFO_TYPE GENMASK(31, 30)
-
-#define MT_RX_FCE_INFO_LEN GENMASK(13, 0)
-#define MT_RX_FCE_INFO_SELF_GEN BIT(15)
-#define MT_RX_FCE_INFO_CMD_SEQ GENMASK(19, 16)
-#define MT_RX_FCE_INFO_EVT_TYPE GENMASK(23, 20)
-#define MT_RX_FCE_INFO_PCIE_INTR BIT(24)
-#define MT_RX_FCE_INFO_QSEL GENMASK(26, 25)
-#define MT_RX_FCE_INFO_D_PORT GENMASK(29, 27)
-#define MT_RX_FCE_INFO_TYPE GENMASK(31, 30)
-
-/* MCU request message header */
-#define MT_MCU_MSG_LEN GENMASK(15, 0)
-#define MT_MCU_MSG_CMD_SEQ GENMASK(19, 16)
-#define MT_MCU_MSG_CMD_TYPE GENMASK(26, 20)
-#define MT_MCU_MSG_PORT GENMASK(29, 27)
-#define MT_MCU_MSG_TYPE GENMASK(31, 30)
-#define MT_MCU_MSG_TYPE_CMD BIT(30)
-
enum mt76x2_qsel {
MT_QSEL_MGMT,
MT_QSEL_HCCA,
@@ -54,14 +26,4 @@ enum mt76x2_qsel {
MT_QSEL_EDCA_2,
};
-enum dma_msg_port {
- WLAN_PORT,
- CPU_RX_PORT,
- CPU_TX_PORT,
- HOST_PORT,
- VIRTUAL_CPU_RX_PORT,
- VIRTUAL_CPU_TX_PORT,
- DISCARD,
-};
-
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
index 95d5f7d888f0..1753bcb36356 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
@@ -40,8 +40,7 @@ mt76x2_eeprom_get_macaddr(struct mt76x2_dev *dev)
return 0;
}
-static void
-mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev)
+void mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev)
{
u16 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0);
@@ -58,6 +57,7 @@ mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev)
break;
}
}
+EXPORT_SYMBOL_GPL(mt76x2_eeprom_parse_hw_cap);
static int
mt76x2_efuse_read(struct mt76x2_dev *dev, u16 addr, u8 *data)
@@ -415,6 +415,7 @@ void mt76x2_read_rx_gain(struct mt76x2_dev *dev)
dev->cal.rx.lna_gain = mt76x2_sign_extend(lna, 8);
}
+EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain);
static s8
mt76x2_rate_power_val(u8 val)
@@ -482,6 +483,7 @@ void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t,
val >>= 8;
t->vht[8] = t->vht[9] = mt76x2_rate_power_val(val >> 8);
}
+EXPORT_SYMBOL_GPL(mt76x2_get_rate_power);
int mt76x2_get_max_rate_power(struct mt76_rate_power *r)
{
@@ -493,6 +495,7 @@ int mt76x2_get_max_rate_power(struct mt76_rate_power *r)
return ret;
}
+EXPORT_SYMBOL_GPL(mt76x2_get_max_rate_power);
static void
mt76x2_get_power_info_2g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
@@ -600,6 +603,7 @@ void mt76x2_get_power_info(struct mt76x2_dev *dev,
t->delta_bw40 = mt76x2_rate_power_val(bw40);
t->delta_bw80 = mt76x2_rate_power_val(bw80);
}
+EXPORT_SYMBOL_GPL(mt76x2_get_power_info);
int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t)
{
@@ -632,6 +636,7 @@ int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t)
return 0;
}
+EXPORT_SYMBOL_GPL(mt76x2_get_temp_comp);
bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band)
{
@@ -642,6 +647,7 @@ bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band)
else
return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_2G);
}
+EXPORT_SYMBOL_GPL(mt76x2_ext_pa_enabled);
int mt76x2_eeprom_init(struct mt76x2_dev *dev)
{
@@ -658,3 +664,6 @@ int mt76x2_eeprom_init(struct mt76x2_dev *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(mt76x2_eeprom_init);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
index aa0b0c040375..0f3e4d2f4fee 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
@@ -155,6 +155,7 @@ void mt76x2_get_power_info(struct mt76x2_dev *dev,
int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t);
bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band);
void mt76x2_read_rx_gain(struct mt76x2_dev *dev);
+void mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev);
static inline bool
mt76x2_temp_tx_alc_enabled(struct mt76x2_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
index 79ab93613e06..b814391f79ac 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
@@ -19,39 +19,6 @@
#include "mt76x2_eeprom.h"
#include "mt76x2_mcu.h"
-struct mt76x2_reg_pair {
- u32 reg;
- u32 value;
-};
-
-static bool
-mt76x2_wait_for_mac(struct mt76x2_dev *dev)
-{
- int i;
-
- for (i = 0; i < 500; i++) {
- switch (mt76_rr(dev, MT_MAC_CSR0)) {
- case 0:
- case ~0:
- break;
- default:
- return true;
- }
- usleep_range(5000, 10000);
- }
-
- return false;
-}
-
-static bool
-wait_for_wpdma(struct mt76x2_dev *dev)
-{
- return mt76_poll(dev, MT_WPDMA_GLO_CFG,
- MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
- MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
- 0, 1000);
-}
-
static void
mt76x2_mac_pbf_init(struct mt76x2_dev *dev)
{
@@ -71,107 +38,6 @@ mt76x2_mac_pbf_init(struct mt76x2_dev *dev)
}
static void
-mt76x2_write_reg_pairs(struct mt76x2_dev *dev,
- const struct mt76x2_reg_pair *data, int len)
-{
- while (len > 0) {
- mt76_wr(dev, data->reg, data->value);
- len--;
- data++;
- }
-}
-
-static void
-mt76_write_mac_initvals(struct mt76x2_dev *dev)
-{
-#define DEFAULT_PROT_CFG \
- (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \
- FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
- FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) | \
- MT_PROT_CFG_RTS_THRESH)
-
-#define DEFAULT_PROT_CFG_20 \
- (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \
- FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \
- FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
- FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x17))
-
-#define DEFAULT_PROT_CFG_40 \
- (FIELD_PREP(MT_PROT_CFG_RATE, 0x2084) | \
- FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \
- FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
- FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f))
-
- static const struct mt76x2_reg_pair vals[] = {
- /* Copied from MediaTek reference source */
- { MT_PBF_SYS_CTRL, 0x00080c00 },
- { MT_PBF_CFG, 0x1efebcff },
- { MT_FCE_PSE_CTRL, 0x00000001 },
- { MT_MAC_SYS_CTRL, 0x0000000c },
- { MT_MAX_LEN_CFG, 0x003e3f00 },
- { MT_AMPDU_MAX_LEN_20M1S, 0xaaa99887 },
- { MT_AMPDU_MAX_LEN_20M2S, 0x000000aa },
- { MT_XIFS_TIME_CFG, 0x33a40d0a },
- { MT_BKOFF_SLOT_CFG, 0x00000209 },
- { MT_TBTT_SYNC_CFG, 0x00422010 },
- { MT_PWR_PIN_CFG, 0x00000000 },
- { 0x1238, 0x001700c8 },
- { MT_TX_SW_CFG0, 0x00101001 },
- { MT_TX_SW_CFG1, 0x00010000 },
- { MT_TX_SW_CFG2, 0x00000000 },
- { MT_TXOP_CTRL_CFG, 0x0400583f },
- { MT_TX_RTS_CFG, 0x00100020 },
- { MT_TX_TIMEOUT_CFG, 0x000a2290 },
- { MT_TX_RETRY_CFG, 0x47f01f0f },
- { MT_EXP_ACK_TIME, 0x002c00dc },
- { MT_TX_PROT_CFG6, 0xe3f42004 },
- { MT_TX_PROT_CFG7, 0xe3f42084 },
- { MT_TX_PROT_CFG8, 0xe3f42104 },
- { MT_PIFS_TX_CFG, 0x00060fff },
- { MT_RX_FILTR_CFG, 0x00015f97 },
- { MT_LEGACY_BASIC_RATE, 0x0000017f },
- { MT_HT_BASIC_RATE, 0x00004003 },
- { MT_PN_PAD_MODE, 0x00000003 },
- { MT_TXOP_HLDR_ET, 0x00000002 },
- { 0xa44, 0x00000000 },
- { MT_HEADER_TRANS_CTRL_REG, 0x00000000 },
- { MT_TSO_CTRL, 0x00000000 },
- { MT_AUX_CLK_CFG, 0x00000000 },
- { MT_DACCLK_EN_DLY_CFG, 0x00000000 },
- { MT_TX_ALC_CFG_4, 0x00000000 },
- { MT_TX_ALC_VGA3, 0x00000000 },
- { MT_TX_PWR_CFG_0, 0x3a3a3a3a },
- { MT_TX_PWR_CFG_1, 0x3a3a3a3a },
- { MT_TX_PWR_CFG_2, 0x3a3a3a3a },
- { MT_TX_PWR_CFG_3, 0x3a3a3a3a },
- { MT_TX_PWR_CFG_4, 0x3a3a3a3a },
- { MT_TX_PWR_CFG_7, 0x3a3a3a3a },
- { MT_TX_PWR_CFG_8, 0x0000003a },
- { MT_TX_PWR_CFG_9, 0x0000003a },
- { MT_EFUSE_CTRL, 0x0000d000 },
- { MT_PAUSE_ENABLE_CONTROL1, 0x0000000a },
- { MT_FCE_WLAN_FLOW_CONTROL1, 0x60401c18 },
- { MT_WPDMA_DELAY_INT_CFG, 0x94ff0000 },
- { MT_TX_SW_CFG3, 0x00000004 },
- { MT_HT_FBK_TO_LEGACY, 0x00001818 },
- { MT_VHT_HT_FBK_CFG1, 0xedcba980 },
- { MT_PROT_AUTO_TX_CFG, 0x00830083 },
- { MT_HT_CTRL_CFG, 0x000001ff },
- };
- struct mt76x2_reg_pair prot_vals[] = {
- { MT_CCK_PROT_CFG, DEFAULT_PROT_CFG },
- { MT_OFDM_PROT_CFG, DEFAULT_PROT_CFG },
- { MT_MM20_PROT_CFG, DEFAULT_PROT_CFG_20 },
- { MT_MM40_PROT_CFG, DEFAULT_PROT_CFG_40 },
- { MT_GF20_PROT_CFG, DEFAULT_PROT_CFG_20 },
- { MT_GF40_PROT_CFG, DEFAULT_PROT_CFG_40 },
- };
-
- mt76x2_write_reg_pairs(dev, vals, ARRAY_SIZE(vals));
- mt76x2_write_reg_pairs(dev, prot_vals, ARRAY_SIZE(prot_vals));
-}
-
-static void
mt76x2_fixup_xtal(struct mt76x2_dev *dev)
{
u16 eep_val;
@@ -360,41 +226,6 @@ int mt76x2_mac_start(struct mt76x2_dev *dev)
return 0;
}
-void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force)
-{
- bool stopped = false;
- u32 rts_cfg;
- int i;
-
- mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
-
- rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
- mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
-
- /* Wait for MAC to become idle */
- for (i = 0; i < 300; i++) {
- if ((mt76_rr(dev, MT_MAC_STATUS) &
- (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) ||
- mt76_rr(dev, MT_BBP(IBI, 12))) {
- udelay(1);
- continue;
- }
-
- stopped = true;
- break;
- }
-
- if (force && !stopped) {
- mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
- mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
-
- mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
- mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
- }
-
- mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
-}
-
void mt76x2_mac_resume(struct mt76x2_dev *dev)
{
mt76_wr(dev, MT_MAC_SYS_CTRL,
@@ -498,45 +329,6 @@ void mt76x2_set_tx_ackto(struct mt76x2_dev *dev)
MT_TX_TIMEOUT_CFG_ACKTO, ackto);
}
-static void
-mt76x2_set_wlan_state(struct mt76x2_dev *dev, bool enable)
-{
- u32 val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
-
- if (enable)
- val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
- MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
- else
- val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN |
- MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
-
- mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
- udelay(20);
-}
-
-static void
-mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable)
-{
- u32 val;
-
- val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
-
- val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
-
- if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
- val |= MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
- mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
- udelay(20);
-
- val &= ~MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
- }
-
- mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
- udelay(20);
-
- mt76x2_set_wlan_state(dev, enable);
-}
-
int mt76x2_init_hardware(struct mt76x2_dev *dev)
{
static const u16 beacon_offsets[16] = {
@@ -567,11 +359,6 @@ int mt76x2_init_hardware(struct mt76x2_dev *dev)
tasklet_init(&dev->pre_tbtt_tasklet, mt76x2_pre_tbtt_tasklet,
(unsigned long) dev);
- dev->chainmask = 0x202;
- dev->global_wcid.idx = 255;
- dev->global_wcid.hw_key_idx = -1;
- dev->slottime = 9;
-
val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
MT_WPDMA_GLO_CFG_BIG_ENDIAN |
@@ -663,34 +450,6 @@ static void mt76x2_regd_notifier(struct wiphy *wiphy,
mt76x2_dfs_set_domain(dev, request->dfs_region);
}
-#define CCK_RATE(_idx, _rate) { \
- .bitrate = _rate, \
- .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
- .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \
- .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \
-}
-
-#define OFDM_RATE(_idx, _rate) { \
- .bitrate = _rate, \
- .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \
- .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \
-}
-
-static struct ieee80211_rate mt76x2_rates[] = {
- CCK_RATE(0, 10),
- CCK_RATE(1, 20),
- CCK_RATE(2, 55),
- CCK_RATE(3, 110),
- OFDM_RATE(0, 60),
- OFDM_RATE(1, 90),
- OFDM_RATE(2, 120),
- OFDM_RATE(3, 180),
- OFDM_RATE(4, 240),
- OFDM_RATE(5, 360),
- OFDM_RATE(6, 480),
- OFDM_RATE(7, 540),
-};
-
static const struct ieee80211_iface_limit if_limits[] = {
{
.max = 1,
@@ -767,37 +526,6 @@ static void mt76x2_led_set_brightness(struct led_classdev *led_cdev,
mt76x2_led_set_config(mt76, 0xff, 0);
}
-static void
-mt76x2_init_txpower(struct mt76x2_dev *dev,
- struct ieee80211_supported_band *sband)
-{
- struct ieee80211_channel *chan;
- struct mt76x2_tx_power_info txp;
- struct mt76_rate_power t = {};
- int target_power;
- int i;
-
- for (i = 0; i < sband->n_channels; i++) {
- chan = &sband->channels[i];
-
- mt76x2_get_power_info(dev, &txp, chan);
-
- target_power = max_t(int, (txp.chain[0].target_power +
- txp.chain[0].delta),
- (txp.chain[1].target_power +
- txp.chain[1].delta));
-
- mt76x2_get_rate_power(dev, &t, chan);
-
- chan->max_power = mt76x2_get_max_rate_power(&t) +
- target_power;
- chan->max_power /= 2;
-
- /* convert to combined output power on 2x2 devices */
- chan->max_power += 3;
- }
-}
-
int mt76x2_register_device(struct mt76x2_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
@@ -812,20 +540,15 @@ int mt76x2_register_device(struct mt76x2_dev *dev)
return -ENOMEM;
kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
+ INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate);
+ INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work);
+
+ mt76x2_init_device(dev);
ret = mt76x2_init_hardware(dev);
if (ret)
return ret;
- hw->queues = 4;
- hw->max_rates = 1;
- hw->max_report_rates = 7;
- hw->max_rate_tries = 1;
- hw->extra_tx_headroom = 2;
-
- hw->sta_data_size = sizeof(struct mt76x2_sta);
- hw->vif_data_size = sizeof(struct mt76x2_vif);
-
for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) {
u8 *addr = dev->macaddr_list[i].addr;
@@ -845,16 +568,15 @@ int mt76x2_register_device(struct mt76x2_dev *dev)
wiphy->reg_notifier = mt76x2_regd_notifier;
- wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
-
- ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
- ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
-
- INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate);
- INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work);
+ wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_ADHOC);
- dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
- dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
mt76x2_dfs_init_detector(dev);
@@ -862,9 +584,6 @@ int mt76x2_register_device(struct mt76x2_dev *dev)
dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness;
dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink;
- /* init antenna configuration */
- dev->mt76.antenna_mask = 3;
-
ret = mt76_register_device(&dev->mt76, true, mt76x2_rates,
ARRAY_SIZE(mt76x2_rates));
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c
new file mode 100644
index 000000000000..324b2a4b8b67
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+#include "mt76x2_eeprom.h"
+
+#define CCK_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
+ .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \
+ .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \
+}
+
+#define OFDM_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \
+ .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \
+}
+
+struct ieee80211_rate mt76x2_rates[] = {
+ CCK_RATE(0, 10),
+ CCK_RATE(1, 20),
+ CCK_RATE(2, 55),
+ CCK_RATE(3, 110),
+ OFDM_RATE(0, 60),
+ OFDM_RATE(1, 90),
+ OFDM_RATE(2, 120),
+ OFDM_RATE(3, 180),
+ OFDM_RATE(4, 240),
+ OFDM_RATE(5, 360),
+ OFDM_RATE(6, 480),
+ OFDM_RATE(7, 540),
+};
+EXPORT_SYMBOL_GPL(mt76x2_rates);
+
+struct mt76x2_reg_pair {
+ u32 reg;
+ u32 value;
+};
+
+static void
+mt76x2_set_wlan_state(struct mt76x2_dev *dev, bool enable)
+{
+ u32 val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+ if (enable)
+ val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
+ MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+ else
+ val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN |
+ MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+}
+
+void mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable)
+{
+ u32 val;
+
+ val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+ val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
+
+ if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
+ val |= MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ val &= ~MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
+ }
+
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ mt76x2_set_wlan_state(dev, enable);
+}
+EXPORT_SYMBOL_GPL(mt76x2_reset_wlan);
+
+static void
+mt76x2_write_reg_pairs(struct mt76x2_dev *dev,
+ const struct mt76x2_reg_pair *data, int len)
+{
+ while (len > 0) {
+ mt76_wr(dev, data->reg, data->value);
+ len--;
+ data++;
+ }
+}
+
+void mt76_write_mac_initvals(struct mt76x2_dev *dev)
+{
+#define DEFAULT_PROT_CFG_CCK \
+ (FIELD_PREP(MT_PROT_CFG_RATE, 0x3) | \
+ FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
+ FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) | \
+ MT_PROT_CFG_RTS_THRESH)
+
+#define DEFAULT_PROT_CFG_OFDM \
+ (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \
+ FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
+ FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) | \
+ MT_PROT_CFG_RTS_THRESH)
+
+#define DEFAULT_PROT_CFG_20 \
+ (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \
+ FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \
+ FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
+ FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x17))
+
+#define DEFAULT_PROT_CFG_40 \
+ (FIELD_PREP(MT_PROT_CFG_RATE, 0x2084) | \
+ FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \
+ FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
+ FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f))
+
+ static const struct mt76x2_reg_pair vals[] = {
+ /* Copied from MediaTek reference source */
+ { MT_PBF_SYS_CTRL, 0x00080c00 },
+ { MT_PBF_CFG, 0x1efebcff },
+ { MT_FCE_PSE_CTRL, 0x00000001 },
+ { MT_MAC_SYS_CTRL, 0x0000000c },
+ { MT_MAX_LEN_CFG, 0x003e3f00 },
+ { MT_AMPDU_MAX_LEN_20M1S, 0xaaa99887 },
+ { MT_AMPDU_MAX_LEN_20M2S, 0x000000aa },
+ { MT_XIFS_TIME_CFG, 0x33a40d0a },
+ { MT_BKOFF_SLOT_CFG, 0x00000209 },
+ { MT_TBTT_SYNC_CFG, 0x00422010 },
+ { MT_PWR_PIN_CFG, 0x00000000 },
+ { 0x1238, 0x001700c8 },
+ { MT_TX_SW_CFG0, 0x00101001 },
+ { MT_TX_SW_CFG1, 0x00010000 },
+ { MT_TX_SW_CFG2, 0x00000000 },
+ { MT_TXOP_CTRL_CFG, 0x0400583f },
+ { MT_TX_RTS_CFG, 0x00100020 },
+ { MT_TX_TIMEOUT_CFG, 0x000a2290 },
+ { MT_TX_RETRY_CFG, 0x47f01f0f },
+ { MT_EXP_ACK_TIME, 0x002c00dc },
+ { MT_TX_PROT_CFG6, 0xe3f42004 },
+ { MT_TX_PROT_CFG7, 0xe3f42084 },
+ { MT_TX_PROT_CFG8, 0xe3f42104 },
+ { MT_PIFS_TX_CFG, 0x00060fff },
+ { MT_RX_FILTR_CFG, 0x00015f97 },
+ { MT_LEGACY_BASIC_RATE, 0x0000017f },
+ { MT_HT_BASIC_RATE, 0x00004003 },
+ { MT_PN_PAD_MODE, 0x00000003 },
+ { MT_TXOP_HLDR_ET, 0x00000002 },
+ { 0xa44, 0x00000000 },
+ { MT_HEADER_TRANS_CTRL_REG, 0x00000000 },
+ { MT_TSO_CTRL, 0x00000000 },
+ { MT_AUX_CLK_CFG, 0x00000000 },
+ { MT_DACCLK_EN_DLY_CFG, 0x00000000 },
+ { MT_TX_ALC_CFG_4, 0x00000000 },
+ { MT_TX_ALC_VGA3, 0x00000000 },
+ { MT_TX_PWR_CFG_0, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_1, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_2, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_3, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_4, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_7, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_8, 0x0000003a },
+ { MT_TX_PWR_CFG_9, 0x0000003a },
+ { MT_EFUSE_CTRL, 0x0000d000 },
+ { MT_PAUSE_ENABLE_CONTROL1, 0x0000000a },
+ { MT_FCE_WLAN_FLOW_CONTROL1, 0x60401c18 },
+ { MT_WPDMA_DELAY_INT_CFG, 0x94ff0000 },
+ { MT_TX_SW_CFG3, 0x00000004 },
+ { MT_HT_FBK_TO_LEGACY, 0x00001818 },
+ { MT_VHT_HT_FBK_CFG1, 0xedcba980 },
+ { MT_PROT_AUTO_TX_CFG, 0x00830083 },
+ { MT_HT_CTRL_CFG, 0x000001ff },
+ };
+ struct mt76x2_reg_pair prot_vals[] = {
+ { MT_CCK_PROT_CFG, DEFAULT_PROT_CFG_CCK },
+ { MT_OFDM_PROT_CFG, DEFAULT_PROT_CFG_OFDM },
+ { MT_MM20_PROT_CFG, DEFAULT_PROT_CFG_20 },
+ { MT_MM40_PROT_CFG, DEFAULT_PROT_CFG_40 },
+ { MT_GF20_PROT_CFG, DEFAULT_PROT_CFG_20 },
+ { MT_GF40_PROT_CFG, DEFAULT_PROT_CFG_40 },
+ };
+
+ mt76x2_write_reg_pairs(dev, vals, ARRAY_SIZE(vals));
+ mt76x2_write_reg_pairs(dev, prot_vals, ARRAY_SIZE(prot_vals));
+}
+EXPORT_SYMBOL_GPL(mt76_write_mac_initvals);
+
+void mt76x2_init_device(struct mt76x2_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+
+ hw->queues = 4;
+ hw->max_rates = 1;
+ hw->max_report_rates = 7;
+ hw->max_rate_tries = 1;
+ hw->extra_tx_headroom = 2;
+
+ hw->sta_data_size = sizeof(struct mt76x2_sta);
+ hw->vif_data_size = sizeof(struct mt76x2_vif);
+
+ ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+ ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+
+ dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+ dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+ dev->chainmask = 0x202;
+ dev->global_wcid.idx = 255;
+ dev->global_wcid.hw_key_idx = -1;
+ dev->slottime = 9;
+
+ /* init antenna configuration */
+ dev->mt76.antenna_mask = 3;
+}
+EXPORT_SYMBOL_GPL(mt76x2_init_device);
+
+void mt76x2_init_txpower(struct mt76x2_dev *dev,
+ struct ieee80211_supported_band *sband)
+{
+ struct ieee80211_channel *chan;
+ struct mt76x2_tx_power_info txp;
+ struct mt76_rate_power t = {};
+ int target_power;
+ int i;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ chan = &sband->channels[i];
+
+ mt76x2_get_power_info(dev, &txp, chan);
+
+ target_power = max_t(int, (txp.chain[0].target_power +
+ txp.chain[0].delta),
+ (txp.chain[1].target_power +
+ txp.chain[1].delta));
+
+ mt76x2_get_rate_power(dev, &t, chan);
+
+ chan->max_power = mt76x2_get_max_rate_power(&t) +
+ target_power;
+ chan->max_power /= 2;
+
+ /* convert to combined output power on 2x2 devices */
+ chan->max_power += 3;
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x2_init_txpower);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
index b49aea4da2d6..23cf437d14f9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
@@ -28,500 +28,12 @@ void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr)
get_unaligned_le16(addr + 4));
}
-static int
-mt76x2_mac_process_rate(struct mt76_rx_status *status, u16 rate)
-{
- u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
-
- switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
- case MT_PHY_TYPE_OFDM:
- if (idx >= 8)
- idx = 0;
-
- if (status->band == NL80211_BAND_2GHZ)
- idx += 4;
-
- status->rate_idx = idx;
- return 0;
- case MT_PHY_TYPE_CCK:
- if (idx >= 8) {
- idx -= 8;
- status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
- }
-
- if (idx >= 4)
- idx = 0;
-
- status->rate_idx = idx;
- return 0;
- case MT_PHY_TYPE_HT_GF:
- status->enc_flags |= RX_ENC_FLAG_HT_GF;
- /* fall through */
- case MT_PHY_TYPE_HT:
- status->encoding = RX_ENC_HT;
- status->rate_idx = idx;
- break;
- case MT_PHY_TYPE_VHT:
- status->encoding = RX_ENC_VHT;
- status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
- status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
- break;
- default:
- return -EINVAL;
- }
-
- if (rate & MT_RXWI_RATE_LDPC)
- status->enc_flags |= RX_ENC_FLAG_LDPC;
-
- if (rate & MT_RXWI_RATE_SGI)
- status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
-
- if (rate & MT_RXWI_RATE_STBC)
- status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
-
- switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
- case MT_PHY_BW_20:
- break;
- case MT_PHY_BW_40:
- status->bw = RATE_INFO_BW_40;
- break;
- case MT_PHY_BW_80:
- status->bw = RATE_INFO_BW_80;
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-static __le16
-mt76x2_mac_tx_rate_val(struct mt76x2_dev *dev,
- const struct ieee80211_tx_rate *rate, u8 *nss_val)
-{
- u16 rateval;
- u8 phy, rate_idx;
- u8 nss = 1;
- u8 bw = 0;
-
- if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
- rate_idx = rate->idx;
- nss = 1 + (rate->idx >> 4);
- phy = MT_PHY_TYPE_VHT;
- if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
- bw = 2;
- else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- bw = 1;
- } else if (rate->flags & IEEE80211_TX_RC_MCS) {
- rate_idx = rate->idx;
- nss = 1 + (rate->idx >> 3);
- phy = MT_PHY_TYPE_HT;
- if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
- phy = MT_PHY_TYPE_HT_GF;
- if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- bw = 1;
- } else {
- const struct ieee80211_rate *r;
- int band = dev->mt76.chandef.chan->band;
- u16 val;
-
- r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
- if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
- val = r->hw_value_short;
- else
- val = r->hw_value;
-
- phy = val >> 8;
- rate_idx = val & 0xff;
- bw = 0;
- }
-
- rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
- rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
- rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
- if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
- rateval |= MT_RXWI_RATE_SGI;
-
- *nss_val = nss;
- return cpu_to_le16(rateval);
-}
-
-void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop)
-{
- u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
- u32 bit = MT_WCID_DROP_MASK(idx);
-
- /* prevent unnecessary writes */
- if ((val & bit) != (bit * drop))
- mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
-}
-
-void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid,
- const struct ieee80211_tx_rate *rate)
-{
- spin_lock_bh(&dev->mt76.lock);
- wcid->tx_rate = mt76x2_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
- wcid->tx_rate_set = true;
- spin_unlock_bh(&dev->mt76.lock);
-}
-
-void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_tx_rate *rate = &info->control.rates[0];
- struct ieee80211_key_conf *key = info->control.hw_key;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
- u16 txwi_flags = 0;
- u8 nss;
- s8 txpwr_adj, max_txpwr_adj;
- u8 ccmp_pn[8];
-
- memset(txwi, 0, sizeof(*txwi));
-
- if (wcid)
- txwi->wcid = wcid->idx;
- else
- txwi->wcid = 0xff;
-
- txwi->pktid = 1;
-
- if (wcid && wcid->sw_iv && key) {
- u64 pn = atomic64_inc_return(&key->tx_pn);
- ccmp_pn[0] = pn;
- ccmp_pn[1] = pn >> 8;
- ccmp_pn[2] = 0;
- ccmp_pn[3] = 0x20 | (key->keyidx << 6);
- ccmp_pn[4] = pn >> 16;
- ccmp_pn[5] = pn >> 24;
- ccmp_pn[6] = pn >> 32;
- ccmp_pn[7] = pn >> 40;
- txwi->iv = *((__le32 *)&ccmp_pn[0]);
- txwi->eiv = *((__le32 *)&ccmp_pn[1]);
- }
-
- spin_lock_bh(&dev->mt76.lock);
- if (wcid && (rate->idx < 0 || !rate->count)) {
- txwi->rate = wcid->tx_rate;
- max_txpwr_adj = wcid->max_txpwr_adj;
- nss = wcid->tx_rate_nss;
- } else {
- txwi->rate = mt76x2_mac_tx_rate_val(dev, rate, &nss);
- max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, rate);
- }
- spin_unlock_bh(&dev->mt76.lock);
-
- txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, dev->txpower_conf,
- max_txpwr_adj);
- txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
-
- if (mt76xx_rev(dev) >= MT76XX_REV_E4)
- txwi->txstream = 0x13;
- else if (mt76xx_rev(dev) >= MT76XX_REV_E3 &&
- !(txwi->rate & cpu_to_le16(rate_ht_mask)))
- txwi->txstream = 0x93;
-
- if (info->flags & IEEE80211_TX_CTL_LDPC)
- txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
- if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
- txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
- if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
- txwi_flags |= MT_TXWI_FLAGS_MMPS;
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
- txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
- if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
- txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
- if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
- txwi->pktid |= MT_TXWI_PKTID_PROBE;
- if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
- u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
-
- ba_size <<= sta->ht_cap.ampdu_factor;
- ba_size = min_t(int, 63, ba_size - 1);
- if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
- ba_size = 0;
- txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
-
- txwi_flags |= MT_TXWI_FLAGS_AMPDU |
- FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
- sta->ht_cap.ampdu_density);
- }
-
- if (ieee80211_is_probe_resp(hdr->frame_control) ||
- ieee80211_is_beacon(hdr->frame_control))
- txwi_flags |= MT_TXWI_FLAGS_TS;
-
- txwi->flags |= cpu_to_le16(txwi_flags);
- txwi->len_ctl = cpu_to_le16(skb->len);
-}
-
-static void mt76x2_remove_hdr_pad(struct sk_buff *skb, int len)
-{
- int hdrlen;
-
- if (!len)
- return;
-
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- memmove(skb->data + len, skb->data, hdrlen);
- skb_pull(skb, len);
-}
-
-static struct mt76_wcid *
-mt76x2_rx_get_sta_wcid(struct mt76x2_dev *dev, u8 idx, bool unicast)
-{
- struct mt76x2_sta *sta;
- struct mt76_wcid *wcid;
-
- if (idx >= ARRAY_SIZE(dev->wcid))
- return NULL;
-
- wcid = rcu_dereference(dev->wcid[idx]);
- if (unicast || !wcid)
- return wcid;
-
- sta = container_of(wcid, struct mt76x2_sta, wcid);
- return &sta->vif->group_wcid;
-}
-
-int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
- void *rxi)
-{
- struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
- struct mt76x2_rxwi *rxwi = rxi;
- u32 rxinfo = le32_to_cpu(rxwi->rxinfo);
- u32 ctl = le32_to_cpu(rxwi->ctl);
- u16 rate = le16_to_cpu(rxwi->rate);
- u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
- bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
- int pad_len = 0;
- u8 pn_len;
- u8 wcid;
- int len;
-
- if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
- return -EINVAL;
-
- if (rxinfo & MT_RXINFO_L2PAD)
- pad_len += 2;
-
- if (rxinfo & MT_RXINFO_DECRYPT) {
- status->flag |= RX_FLAG_DECRYPTED;
- status->flag |= RX_FLAG_MMIC_STRIPPED;
- status->flag |= RX_FLAG_MIC_STRIPPED;
- status->flag |= RX_FLAG_IV_STRIPPED;
- }
-
- wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl);
- status->wcid = mt76x2_rx_get_sta_wcid(dev, wcid, unicast);
-
- len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
- pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo);
- if (pn_len) {
- int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len;
- u8 *data = skb->data + offset;
-
- status->iv[0] = data[7];
- status->iv[1] = data[6];
- status->iv[2] = data[5];
- status->iv[3] = data[4];
- status->iv[4] = data[1];
- status->iv[5] = data[0];
-
- /*
- * Driver CCMP validation can't deal with fragments.
- * Let mac80211 take care of it.
- */
- if (rxinfo & MT_RXINFO_FRAG) {
- status->flag &= ~RX_FLAG_IV_STRIPPED;
- } else {
- pad_len += pn_len << 2;
- len -= pn_len << 2;
- }
- }
-
- mt76x2_remove_hdr_pad(skb, pad_len);
-
- if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL))
- status->aggr = true;
-
- if (WARN_ON_ONCE(len > skb->len))
- return -EINVAL;
-
- pskb_trim(skb, len);
- status->chains = BIT(0) | BIT(1);
- status->chain_signal[0] = mt76x2_phy_get_rssi(dev, rxwi->rssi[0], 0);
- status->chain_signal[1] = mt76x2_phy_get_rssi(dev, rxwi->rssi[1], 1);
- status->signal = max(status->chain_signal[0], status->chain_signal[1]);
- status->freq = dev->mt76.chandef.chan->center_freq;
- status->band = dev->mt76.chandef.chan->band;
-
- status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
- status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
-
- return mt76x2_mac_process_rate(status, rate);
-}
-
-static int
-mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
- enum nl80211_band band)
-{
- u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
-
- txrate->idx = 0;
- txrate->flags = 0;
- txrate->count = 1;
-
- switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
- case MT_PHY_TYPE_OFDM:
- if (band == NL80211_BAND_2GHZ)
- idx += 4;
-
- txrate->idx = idx;
- return 0;
- case MT_PHY_TYPE_CCK:
- if (idx >= 8)
- idx -= 8;
-
- txrate->idx = idx;
- return 0;
- case MT_PHY_TYPE_HT_GF:
- txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
- /* fall through */
- case MT_PHY_TYPE_HT:
- txrate->flags |= IEEE80211_TX_RC_MCS;
- txrate->idx = idx;
- break;
- case MT_PHY_TYPE_VHT:
- txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
- txrate->idx = idx;
- break;
- default:
- return -EINVAL;
- }
-
- switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
- case MT_PHY_BW_20:
- break;
- case MT_PHY_BW_40:
- txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- break;
- case MT_PHY_BW_80:
- txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
- break;
- default:
- return -EINVAL;
- }
-
- if (rate & MT_RXWI_RATE_SGI)
- txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
-
- return 0;
-}
-
-static void
-mt76x2_mac_fill_tx_status(struct mt76x2_dev *dev,
- struct ieee80211_tx_info *info,
- struct mt76x2_tx_status *st, int n_frames)
-{
- struct ieee80211_tx_rate *rate = info->status.rates;
- int cur_idx, last_rate;
- int i;
-
- if (!n_frames)
- return;
-
- last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
- mt76x2_mac_process_tx_rate(&rate[last_rate], st->rate,
- dev->mt76.chandef.chan->band);
- if (last_rate < IEEE80211_TX_MAX_RATES - 1)
- rate[last_rate + 1].idx = -1;
-
- cur_idx = rate[last_rate].idx + st->retry;
- for (i = 0; i <= last_rate; i++) {
- rate[i].flags = rate[last_rate].flags;
- rate[i].idx = max_t(int, 0, cur_idx - i);
- rate[i].count = 1;
- }
-
- if (last_rate > 0)
- rate[last_rate - 1].count = st->retry + 1 - last_rate;
-
- info->status.ampdu_len = n_frames;
- info->status.ampdu_ack_len = st->success ? n_frames : 0;
-
- if (st->pktid & MT_TXWI_PKTID_PROBE)
- info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
-
- if (st->aggr)
- info->flags |= IEEE80211_TX_CTL_AMPDU |
- IEEE80211_TX_STAT_AMPDU;
-
- if (!st->ack_req)
- info->flags |= IEEE80211_TX_CTL_NO_ACK;
- else if (st->success)
- info->flags |= IEEE80211_TX_STAT_ACK;
-}
-
-static void
-mt76x2_send_tx_status(struct mt76x2_dev *dev, struct mt76x2_tx_status *stat,
- u8 *update)
-{
- struct ieee80211_tx_info info = {};
- struct ieee80211_sta *sta = NULL;
- struct mt76_wcid *wcid = NULL;
- struct mt76x2_sta *msta = NULL;
-
- rcu_read_lock();
- if (stat->wcid < ARRAY_SIZE(dev->wcid))
- wcid = rcu_dereference(dev->wcid[stat->wcid]);
-
- if (wcid) {
- void *priv;
-
- priv = msta = container_of(wcid, struct mt76x2_sta, wcid);
- sta = container_of(priv, struct ieee80211_sta,
- drv_priv);
- }
-
- if (msta && stat->aggr) {
- u32 stat_val, stat_cache;
-
- stat_val = stat->rate;
- stat_val |= ((u32) stat->retry) << 16;
- stat_cache = msta->status.rate;
- stat_cache |= ((u32) msta->status.retry) << 16;
-
- if (*update == 0 && stat_val == stat_cache &&
- stat->wcid == msta->status.wcid && msta->n_frames < 32) {
- msta->n_frames++;
- goto out;
- }
-
- mt76x2_mac_fill_tx_status(dev, &info, &msta->status,
- msta->n_frames);
-
- msta->status = *stat;
- msta->n_frames = 1;
- *update = 0;
- } else {
- mt76x2_mac_fill_tx_status(dev, &info, stat, 1);
- *update = 1;
- }
-
- ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
-
-out:
- rcu_read_unlock();
-}
-
void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq)
{
struct mt76x2_tx_status stat = {};
unsigned long flags;
u8 update = 1;
+ bool ret;
if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
return;
@@ -529,26 +41,13 @@ void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq)
trace_mac_txstat_poll(dev);
while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
- u32 stat1, stat2;
-
spin_lock_irqsave(&dev->irq_lock, flags);
- stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
- stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
- if (!(stat1 & MT_TX_STAT_FIFO_VALID)) {
- spin_unlock_irqrestore(&dev->irq_lock, flags);
- break;
- }
-
+ ret = mt76x2_mac_load_tx_status(dev, &stat);
spin_unlock_irqrestore(&dev->irq_lock, flags);
- stat.valid = 1;
- stat.success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
- stat.aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
- stat.ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
- stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
- stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
- stat.retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
- stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
+ if (!ret)
+ break;
+
trace_mac_txstat_fetch(dev, &stat);
if (!irq) {
@@ -597,104 +96,6 @@ void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
dev_kfree_skb_any(e->skb);
}
-static enum mt76x2_cipher_type
-mt76x2_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
-{
- memset(key_data, 0, 32);
- if (!key)
- return MT_CIPHER_NONE;
-
- if (key->keylen > 32)
- return MT_CIPHER_NONE;
-
- memcpy(key_data, key->key, key->keylen);
-
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- return MT_CIPHER_WEP40;
- case WLAN_CIPHER_SUITE_WEP104:
- return MT_CIPHER_WEP104;
- case WLAN_CIPHER_SUITE_TKIP:
- return MT_CIPHER_TKIP;
- case WLAN_CIPHER_SUITE_CCMP:
- return MT_CIPHER_AES_CCMP;
- default:
- return MT_CIPHER_NONE;
- }
-}
-
-void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
-{
- struct mt76_wcid_addr addr = {};
- u32 attr;
-
- attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
- FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
-
- mt76_wr(dev, MT_WCID_ATTR(idx), attr);
-
- mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
- mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
-
- if (idx >= 128)
- return;
-
- if (mac)
- memcpy(addr.macaddr, mac, ETH_ALEN);
-
- mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
-}
-
-int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
- struct ieee80211_key_conf *key)
-{
- enum mt76x2_cipher_type cipher;
- u8 key_data[32];
- u8 iv_data[8];
-
- cipher = mt76x2_mac_get_key_info(key, key_data);
- if (cipher == MT_CIPHER_NONE && key)
- return -EOPNOTSUPP;
-
- mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
- mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
-
- memset(iv_data, 0, sizeof(iv_data));
- if (key) {
- mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
- !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
- iv_data[3] = key->keyidx << 6;
- if (cipher >= MT_CIPHER_TKIP)
- iv_data[3] |= 0x20;
- }
-
- mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
-
- return 0;
-}
-
-int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
- struct ieee80211_key_conf *key)
-{
- enum mt76x2_cipher_type cipher;
- u8 key_data[32];
- u32 val;
-
- cipher = mt76x2_mac_get_key_info(key, key_data);
- if (cipher == MT_CIPHER_NONE && key)
- return -EOPNOTSUPP;
-
- val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
- val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
- val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
- mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
-
- mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
- sizeof(key_data));
-
- return 0;
-}
-
static int
mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb)
{
@@ -704,7 +105,7 @@ mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb)
if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x2_txwi)))
return -ENOSPC;
- mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL);
+ mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
offset += sizeof(txwi);
@@ -839,3 +240,33 @@ void mt76x2_mac_work(struct work_struct *work)
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
MT_CALIBRATE_INTERVAL);
}
+
+void mt76x2_mac_set_tx_protection(struct mt76x2_dev *dev, u32 val)
+{
+ u32 data = 0;
+
+ if (val != ~0)
+ data = FIELD_PREP(MT_PROT_CFG_CTRL, 1) |
+ MT_PROT_CFG_RTS_THRESH;
+
+ mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, val);
+
+ mt76_rmw(dev, MT_CCK_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_OFDM_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_MM20_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_MM40_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_GF20_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_GF40_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_TX_PROT_CFG6,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_TX_PROT_CFG7,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_TX_PROT_CFG8,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
index c048cd06df6b..5af0107ba748 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
@@ -166,7 +166,7 @@ int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
void *rxi);
void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta);
+ struct ieee80211_sta *sta, int len);
void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
struct ieee80211_key_conf *key);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c
new file mode 100644
index 000000000000..6542644bc325
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c
@@ -0,0 +1,699 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+
+void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force)
+{
+ bool stopped = false;
+ u32 rts_cfg;
+ int i;
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
+
+ rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
+ mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
+
+ /* Wait for MAC to become idle */
+ for (i = 0; i < 300; i++) {
+ if ((mt76_rr(dev, MT_MAC_STATUS) &
+ (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) ||
+ mt76_rr(dev, MT_BBP(IBI, 12))) {
+ udelay(1);
+ continue;
+ }
+
+ stopped = true;
+ break;
+ }
+
+ if (force && !stopped) {
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
+
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
+ }
+
+ mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_stop);
+
+bool mt76x2_mac_load_tx_status(struct mt76x2_dev *dev,
+ struct mt76x2_tx_status *stat)
+{
+ u32 stat1, stat2;
+
+ stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
+ stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
+
+ stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID);
+ if (!stat->valid)
+ return false;
+
+ stat->success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
+ stat->aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
+ stat->ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
+ stat->wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
+ stat->rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
+
+ stat->retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
+ stat->pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_load_tx_status);
+
+static int
+mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
+ enum nl80211_band band)
+{
+ u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+ txrate->idx = 0;
+ txrate->flags = 0;
+ txrate->count = 1;
+
+ switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+ case MT_PHY_TYPE_OFDM:
+ if (band == NL80211_BAND_2GHZ)
+ idx += 4;
+
+ txrate->idx = idx;
+ return 0;
+ case MT_PHY_TYPE_CCK:
+ if (idx >= 8)
+ idx -= 8;
+
+ txrate->idx = idx;
+ return 0;
+ case MT_PHY_TYPE_HT_GF:
+ txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ /* fall through */
+ case MT_PHY_TYPE_HT:
+ txrate->flags |= IEEE80211_TX_RC_MCS;
+ txrate->idx = idx;
+ break;
+ case MT_PHY_TYPE_VHT:
+ txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
+ txrate->idx = idx;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+ case MT_PHY_BW_20:
+ break;
+ case MT_PHY_BW_40:
+ txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ break;
+ case MT_PHY_BW_80:
+ txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (rate & MT_RXWI_RATE_SGI)
+ txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
+
+ return 0;
+}
+
+static void
+mt76x2_mac_fill_tx_status(struct mt76x2_dev *dev,
+ struct ieee80211_tx_info *info,
+ struct mt76x2_tx_status *st, int n_frames)
+{
+ struct ieee80211_tx_rate *rate = info->status.rates;
+ int cur_idx, last_rate;
+ int i;
+
+ if (!n_frames)
+ return;
+
+ last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
+ mt76x2_mac_process_tx_rate(&rate[last_rate], st->rate,
+ dev->mt76.chandef.chan->band);
+ if (last_rate < IEEE80211_TX_MAX_RATES - 1)
+ rate[last_rate + 1].idx = -1;
+
+ cur_idx = rate[last_rate].idx + last_rate;
+ for (i = 0; i <= last_rate; i++) {
+ rate[i].flags = rate[last_rate].flags;
+ rate[i].idx = max_t(int, 0, cur_idx - i);
+ rate[i].count = 1;
+ }
+ rate[last_rate].count = st->retry + 1 - last_rate;
+
+ info->status.ampdu_len = n_frames;
+ info->status.ampdu_ack_len = st->success ? n_frames : 0;
+
+ if (st->pktid & MT_TXWI_PKTID_PROBE)
+ info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+
+ if (st->aggr)
+ info->flags |= IEEE80211_TX_CTL_AMPDU |
+ IEEE80211_TX_STAT_AMPDU;
+
+ if (!st->ack_req)
+ info->flags |= IEEE80211_TX_CTL_NO_ACK;
+ else if (st->success)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+}
+
+void mt76x2_send_tx_status(struct mt76x2_dev *dev,
+ struct mt76x2_tx_status *stat, u8 *update)
+{
+ struct ieee80211_tx_info info = {};
+ struct ieee80211_sta *sta = NULL;
+ struct mt76_wcid *wcid = NULL;
+ struct mt76x2_sta *msta = NULL;
+
+ rcu_read_lock();
+ if (stat->wcid < ARRAY_SIZE(dev->wcid))
+ wcid = rcu_dereference(dev->wcid[stat->wcid]);
+
+ if (wcid) {
+ void *priv;
+
+ priv = msta = container_of(wcid, struct mt76x2_sta, wcid);
+ sta = container_of(priv, struct ieee80211_sta,
+ drv_priv);
+ }
+
+ if (msta && stat->aggr) {
+ u32 stat_val, stat_cache;
+
+ stat_val = stat->rate;
+ stat_val |= ((u32) stat->retry) << 16;
+ stat_cache = msta->status.rate;
+ stat_cache |= ((u32) msta->status.retry) << 16;
+
+ if (*update == 0 && stat_val == stat_cache &&
+ stat->wcid == msta->status.wcid && msta->n_frames < 32) {
+ msta->n_frames++;
+ goto out;
+ }
+
+ mt76x2_mac_fill_tx_status(dev, &info, &msta->status,
+ msta->n_frames);
+
+ msta->status = *stat;
+ msta->n_frames = 1;
+ *update = 0;
+ } else {
+ mt76x2_mac_fill_tx_status(dev, &info, stat, 1);
+ *update = 1;
+ }
+
+ ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
+
+out:
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(mt76x2_send_tx_status);
+
+static enum mt76x2_cipher_type
+mt76x2_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+{
+ memset(key_data, 0, 32);
+ if (!key)
+ return MT_CIPHER_NONE;
+
+ if (key->keylen > 32)
+ return MT_CIPHER_NONE;
+
+ memcpy(key_data, key->key, key->keylen);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return MT_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return MT_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return MT_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return MT_CIPHER_AES_CCMP;
+ default:
+ return MT_CIPHER_NONE;
+ }
+}
+
+int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
+ struct ieee80211_key_conf *key)
+{
+ enum mt76x2_cipher_type cipher;
+ u8 key_data[32];
+ u32 val;
+
+ cipher = mt76x2_mac_get_key_info(key, key_data);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EOPNOTSUPP;
+
+ val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
+ val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
+ val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
+ mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
+
+ mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
+ sizeof(key_data));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_shared_key_setup);
+
+int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key)
+{
+ enum mt76x2_cipher_type cipher;
+ u8 key_data[32];
+ u8 iv_data[8];
+
+ cipher = mt76x2_mac_get_key_info(key, key_data);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EOPNOTSUPP;
+
+ mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
+ mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
+
+ memset(iv_data, 0, sizeof(iv_data));
+ if (key) {
+ mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
+ !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
+ iv_data[3] = key->keyidx << 6;
+ if (cipher >= MT_CIPHER_TKIP)
+ iv_data[3] |= 0x20;
+ }
+
+ mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_key);
+
+static __le16
+mt76x2_mac_tx_rate_val(struct mt76x2_dev *dev,
+ const struct ieee80211_tx_rate *rate, u8 *nss_val)
+{
+ u16 rateval;
+ u8 phy, rate_idx;
+ u8 nss = 1;
+ u8 bw = 0;
+
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+ rate_idx = rate->idx;
+ nss = 1 + (rate->idx >> 4);
+ phy = MT_PHY_TYPE_VHT;
+ if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ bw = 2;
+ else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ bw = 1;
+ } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+ rate_idx = rate->idx;
+ nss = 1 + (rate->idx >> 3);
+ phy = MT_PHY_TYPE_HT;
+ if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+ phy = MT_PHY_TYPE_HT_GF;
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ bw = 1;
+ } else {
+ const struct ieee80211_rate *r;
+ int band = dev->mt76.chandef.chan->band;
+ u16 val;
+
+ r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
+ if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ val = r->hw_value_short;
+ else
+ val = r->hw_value;
+
+ phy = val >> 8;
+ rate_idx = val & 0xff;
+ bw = 0;
+ }
+
+ rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
+ rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
+ rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ rateval |= MT_RXWI_RATE_SGI;
+
+ *nss_val = nss;
+ return cpu_to_le16(rateval);
+}
+
+void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid,
+ const struct ieee80211_tx_rate *rate)
+{
+ spin_lock_bh(&dev->mt76.lock);
+ wcid->tx_rate = mt76x2_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
+ wcid->tx_rate_set = true;
+ spin_unlock_bh(&dev->mt76.lock);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_rate);
+
+void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta, int len)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *rate = &info->control.rates[0];
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
+ u16 txwi_flags = 0;
+ u8 nss;
+ s8 txpwr_adj, max_txpwr_adj;
+ u8 ccmp_pn[8];
+
+ memset(txwi, 0, sizeof(*txwi));
+
+ if (wcid)
+ txwi->wcid = wcid->idx;
+ else
+ txwi->wcid = 0xff;
+
+ txwi->pktid = 1;
+
+ if (wcid && wcid->sw_iv && key) {
+ u64 pn = atomic64_inc_return(&key->tx_pn);
+ ccmp_pn[0] = pn;
+ ccmp_pn[1] = pn >> 8;
+ ccmp_pn[2] = 0;
+ ccmp_pn[3] = 0x20 | (key->keyidx << 6);
+ ccmp_pn[4] = pn >> 16;
+ ccmp_pn[5] = pn >> 24;
+ ccmp_pn[6] = pn >> 32;
+ ccmp_pn[7] = pn >> 40;
+ txwi->iv = *((__le32 *)&ccmp_pn[0]);
+ txwi->eiv = *((__le32 *)&ccmp_pn[1]);
+ }
+
+ spin_lock_bh(&dev->mt76.lock);
+ if (wcid && (rate->idx < 0 || !rate->count)) {
+ txwi->rate = wcid->tx_rate;
+ max_txpwr_adj = wcid->max_txpwr_adj;
+ nss = wcid->tx_rate_nss;
+ } else {
+ txwi->rate = mt76x2_mac_tx_rate_val(dev, rate, &nss);
+ max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, rate);
+ }
+ spin_unlock_bh(&dev->mt76.lock);
+
+ txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, dev->txpower_conf,
+ max_txpwr_adj);
+ txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
+
+ if (mt76xx_rev(dev) >= MT76XX_REV_E4)
+ txwi->txstream = 0x13;
+ else if (mt76xx_rev(dev) >= MT76XX_REV_E3 &&
+ !(txwi->rate & cpu_to_le16(rate_ht_mask)))
+ txwi->txstream = 0x93;
+
+ if (info->flags & IEEE80211_TX_CTL_LDPC)
+ txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
+ if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
+ txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
+ if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
+ txwi_flags |= MT_TXWI_FLAGS_MMPS;
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ txwi->pktid |= MT_TXWI_PKTID_PROBE;
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
+ u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
+
+ ba_size <<= sta->ht_cap.ampdu_factor;
+ ba_size = min_t(int, 63, ba_size - 1);
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ ba_size = 0;
+ txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
+
+ txwi_flags |= MT_TXWI_FLAGS_AMPDU |
+ FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
+ sta->ht_cap.ampdu_density);
+ }
+
+ if (ieee80211_is_probe_resp(hdr->frame_control) ||
+ ieee80211_is_beacon(hdr->frame_control))
+ txwi_flags |= MT_TXWI_FLAGS_TS;
+
+ txwi->flags |= cpu_to_le16(txwi_flags);
+ txwi->len_ctl = cpu_to_le16(len);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_write_txwi);
+
+void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop)
+{
+ u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
+ u32 bit = MT_WCID_DROP_MASK(idx);
+
+ /* prevent unnecessary writes */
+ if ((val & bit) != (bit * drop))
+ mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_drop);
+
+void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
+{
+ struct mt76_wcid_addr addr = {};
+ u32 attr;
+
+ attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
+ FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
+
+ mt76_wr(dev, MT_WCID_ATTR(idx), attr);
+
+ mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
+ mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
+
+ if (idx >= 128)
+ return;
+
+ if (mac)
+ memcpy(addr.macaddr, mac, ETH_ALEN);
+
+ mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_setup);
+
+static int
+mt76x2_mac_process_rate(struct mt76_rx_status *status, u16 rate)
+{
+ u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+ switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+ case MT_PHY_TYPE_OFDM:
+ if (idx >= 8)
+ idx = 0;
+
+ if (status->band == NL80211_BAND_2GHZ)
+ idx += 4;
+
+ status->rate_idx = idx;
+ return 0;
+ case MT_PHY_TYPE_CCK:
+ if (idx >= 8) {
+ idx -= 8;
+ status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+ }
+
+ if (idx >= 4)
+ idx = 0;
+
+ status->rate_idx = idx;
+ return 0;
+ case MT_PHY_TYPE_HT_GF:
+ status->enc_flags |= RX_ENC_FLAG_HT_GF;
+ /* fall through */
+ case MT_PHY_TYPE_HT:
+ status->encoding = RX_ENC_HT;
+ status->rate_idx = idx;
+ break;
+ case MT_PHY_TYPE_VHT:
+ status->encoding = RX_ENC_VHT;
+ status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
+ status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (rate & MT_RXWI_RATE_LDPC)
+ status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+ if (rate & MT_RXWI_RATE_SGI)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ if (rate & MT_RXWI_RATE_STBC)
+ status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
+
+ switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+ case MT_PHY_BW_20:
+ break;
+ case MT_PHY_BW_40:
+ status->bw = RATE_INFO_BW_40;
+ break;
+ case MT_PHY_BW_80:
+ status->bw = RATE_INFO_BW_80;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void mt76x2_remove_hdr_pad(struct sk_buff *skb, int len)
+{
+ int hdrlen;
+
+ if (!len)
+ return;
+
+ hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+ memmove(skb->data + len, skb->data, hdrlen);
+ skb_pull(skb, len);
+}
+
+int mt76x2_mac_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain)
+{
+ struct mt76x2_rx_freq_cal *cal = &dev->cal.rx;
+
+ rssi += cal->rssi_offset[chain];
+ rssi -= cal->lna_gain;
+
+ return rssi;
+}
+
+static struct mt76x2_sta *
+mt76x2_rx_get_sta(struct mt76x2_dev *dev, u8 idx)
+{
+ struct mt76_wcid *wcid;
+
+ if (idx >= ARRAY_SIZE(dev->wcid))
+ return NULL;
+
+ wcid = rcu_dereference(dev->wcid[idx]);
+ if (!wcid)
+ return NULL;
+
+ return container_of(wcid, struct mt76x2_sta, wcid);
+}
+
+static struct mt76_wcid *
+mt76x2_rx_get_sta_wcid(struct mt76x2_dev *dev, struct mt76x2_sta *sta,
+ bool unicast)
+{
+ if (!sta)
+ return NULL;
+
+ if (unicast)
+ return &sta->wcid;
+ else
+ return &sta->vif->group_wcid;
+}
+
+int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
+ void *rxi)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
+ struct mt76x2_rxwi *rxwi = rxi;
+ struct mt76x2_sta *sta;
+ u32 rxinfo = le32_to_cpu(rxwi->rxinfo);
+ u32 ctl = le32_to_cpu(rxwi->ctl);
+ u16 rate = le16_to_cpu(rxwi->rate);
+ u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
+ bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
+ int pad_len = 0;
+ u8 pn_len;
+ u8 wcid;
+ int len;
+
+ if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+ return -EINVAL;
+
+ if (rxinfo & MT_RXINFO_L2PAD)
+ pad_len += 2;
+
+ if (rxinfo & MT_RXINFO_DECRYPT) {
+ status->flag |= RX_FLAG_DECRYPTED;
+ status->flag |= RX_FLAG_MMIC_STRIPPED;
+ status->flag |= RX_FLAG_MIC_STRIPPED;
+ status->flag |= RX_FLAG_IV_STRIPPED;
+ }
+
+ wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl);
+ sta = mt76x2_rx_get_sta(dev, wcid);
+ status->wcid = mt76x2_rx_get_sta_wcid(dev, sta, unicast);
+
+ len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
+ pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo);
+ if (pn_len) {
+ int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len;
+ u8 *data = skb->data + offset;
+
+ status->iv[0] = data[7];
+ status->iv[1] = data[6];
+ status->iv[2] = data[5];
+ status->iv[3] = data[4];
+ status->iv[4] = data[1];
+ status->iv[5] = data[0];
+
+ /*
+ * Driver CCMP validation can't deal with fragments.
+ * Let mac80211 take care of it.
+ */
+ if (rxinfo & MT_RXINFO_FRAG) {
+ status->flag &= ~RX_FLAG_IV_STRIPPED;
+ } else {
+ pad_len += pn_len << 2;
+ len -= pn_len << 2;
+ }
+ }
+
+ mt76x2_remove_hdr_pad(skb, pad_len);
+
+ if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL))
+ status->aggr = true;
+
+ if (WARN_ON_ONCE(len > skb->len))
+ return -EINVAL;
+
+ pskb_trim(skb, len);
+ status->chains = BIT(0) | BIT(1);
+ status->chain_signal[0] = mt76x2_mac_get_rssi(dev, rxwi->rssi[0], 0);
+ status->chain_signal[1] = mt76x2_mac_get_rssi(dev, rxwi->rssi[1], 1);
+ status->signal = max(status->chain_signal[0], status->chain_signal[1]);
+ status->freq = dev->mt76.chandef.chan->center_freq;
+ status->band = dev->mt76.chandef.chan->band;
+
+ status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
+ status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
+
+ if (sta) {
+ ewma_signal_add(&sta->rssi, status->signal);
+ sta->inactive_count = 0;
+ }
+
+ return mt76x2_mac_process_rate(status, rate);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_process_rx);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
index ce90ff999b49..680a89f8aa87 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
@@ -53,30 +53,6 @@ mt76x2_stop(struct ieee80211_hw *hw)
mutex_unlock(&dev->mutex);
}
-static void
-mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq)
-{
- struct mt76_txq *mtxq;
-
- if (!txq)
- return;
-
- mtxq = (struct mt76_txq *) txq->drv_priv;
- if (txq->sta) {
- struct mt76x2_sta *sta;
-
- sta = (struct mt76x2_sta *) txq->sta->drv_priv;
- mtxq->wcid = &sta->wcid;
- } else {
- struct mt76x2_vif *mvif;
-
- mvif = (struct mt76x2_vif *) txq->vif->drv_priv;
- mtxq->wcid = &mvif->group_wcid;
- }
-
- mt76_txq_init(&dev->mt76, txq);
-}
-
static int
mt76x2_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
@@ -111,14 +87,6 @@ mt76x2_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
return 0;
}
-static void
-mt76x2_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
-{
- struct mt76x2_dev *dev = hw->priv;
-
- mt76_txq_remove(&dev->mt76, vif->txq);
-}
-
static int
mt76x2_set_channel(struct mt76x2_dev *dev, struct cfg80211_chan_def *chandef)
{
@@ -194,39 +162,6 @@ mt76x2_config(struct ieee80211_hw *hw, u32 changed)
}
static void
-mt76x2_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
- unsigned int *total_flags, u64 multicast)
-{
- struct mt76x2_dev *dev = hw->priv;
- u32 flags = 0;
-
-#define MT76_FILTER(_flag, _hw) do { \
- flags |= *total_flags & FIF_##_flag; \
- dev->rxfilter &= ~(_hw); \
- dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
- } while (0)
-
- mutex_lock(&dev->mutex);
-
- dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
-
- MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
- MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
- MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
- MT_RX_FILTR_CFG_CTS |
- MT_RX_FILTR_CFG_CFEND |
- MT_RX_FILTR_CFG_CFACK |
- MT_RX_FILTR_CFG_BA |
- MT_RX_FILTR_CFG_CTRL_RSV);
- MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
-
- *total_flags = flags;
- mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
-
- mutex_unlock(&dev->mutex);
-}
-
-static void
mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed)
{
@@ -238,10 +173,13 @@ mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (changed & BSS_CHANGED_BSSID)
mt76x2_mac_set_bssid(dev, mvif->idx, info->bssid);
- if (changed & BSS_CHANGED_BEACON_INT)
+ if (changed & BSS_CHANGED_BEACON_INT) {
mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_INTVAL,
info->beacon_int << 4);
+ dev->beacon_int = info->beacon_int;
+ dev->tbtt_count = 0;
+ }
if (changed & BSS_CHANGED_BEACON_ENABLED) {
tasklet_disable(&dev->pre_tbtt_tasklet);
@@ -260,66 +198,6 @@ mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_unlock(&dev->mutex);
}
-static int
-mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt76x2_dev *dev = hw->priv;
- struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
- struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
- int ret = 0;
- int idx = 0;
- int i;
-
- mutex_lock(&dev->mutex);
-
- idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
- if (idx < 0) {
- ret = -ENOSPC;
- goto out;
- }
-
- msta->vif = mvif;
- msta->wcid.sta = 1;
- msta->wcid.idx = idx;
- msta->wcid.hw_key_idx = -1;
- mt76x2_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
- mt76x2_mac_wcid_set_drop(dev, idx, false);
- for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
- mt76x2_txq_init(dev, sta->txq[i]);
-
- if (vif->type == NL80211_IFTYPE_AP)
- set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
-
- rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
-
-out:
- mutex_unlock(&dev->mutex);
-
- return ret;
-}
-
-static int
-mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt76x2_dev *dev = hw->priv;
- struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
- int idx = msta->wcid.idx;
- int i;
-
- mutex_lock(&dev->mutex);
- rcu_assign_pointer(dev->wcid[idx], NULL);
- for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
- mt76_txq_remove(&dev->mt76, sta->txq[i]);
- mt76x2_mac_wcid_set_drop(dev, idx, true);
- mt76_wcid_free(dev->wcid_mask, idx);
- mt76x2_mac_wcid_setup(dev, idx, 0, NULL);
- mutex_unlock(&dev->mutex);
-
- return 0;
-}
-
void
mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
{
@@ -331,117 +209,6 @@ mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
mt76x2_mac_wcid_set_drop(dev, idx, ps);
}
-static int
-mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
-{
- struct mt76x2_dev *dev = hw->priv;
- struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
- struct mt76x2_sta *msta;
- struct mt76_wcid *wcid;
- int idx = key->keyidx;
- int ret;
-
- /* fall back to sw encryption for unsupported ciphers */
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- case WLAN_CIPHER_SUITE_TKIP:
- case WLAN_CIPHER_SUITE_CCMP:
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- /*
- * The hardware does not support per-STA RX GTK, fall back
- * to software mode for these.
- */
- if ((vif->type == NL80211_IFTYPE_ADHOC ||
- vif->type == NL80211_IFTYPE_MESH_POINT) &&
- (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
- key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
- !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
- return -EOPNOTSUPP;
-
- msta = sta ? (struct mt76x2_sta *) sta->drv_priv : NULL;
- wcid = msta ? &msta->wcid : &mvif->group_wcid;
-
- if (cmd == SET_KEY) {
- key->hw_key_idx = wcid->idx;
- wcid->hw_key_idx = idx;
- if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
- key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
- wcid->sw_iv = true;
- }
- } else {
- if (idx == wcid->hw_key_idx) {
- wcid->hw_key_idx = -1;
- wcid->sw_iv = true;
- }
-
- key = NULL;
- }
- mt76_wcid_key_setup(&dev->mt76, wcid, key);
-
- if (!msta) {
- if (key || wcid->hw_key_idx == idx) {
- ret = mt76x2_mac_wcid_set_key(dev, wcid->idx, key);
- if (ret)
- return ret;
- }
-
- return mt76x2_mac_shared_key_setup(dev, mvif->idx, idx, key);
- }
-
- return mt76x2_mac_wcid_set_key(dev, msta->wcid.idx, key);
-}
-
-static int
-mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
- const struct ieee80211_tx_queue_params *params)
-{
- struct mt76x2_dev *dev = hw->priv;
- u8 cw_min = 5, cw_max = 10, qid;
- u32 val;
-
- qid = dev->mt76.q_tx[queue].hw_idx;
-
- if (params->cw_min)
- cw_min = fls(params->cw_min);
- if (params->cw_max)
- cw_max = fls(params->cw_max);
-
- val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) |
- FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
- FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
- FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
- mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
-
- val = mt76_rr(dev, MT_WMM_TXOP(qid));
- val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid));
- val |= params->txop << MT_WMM_TXOP_SHIFT(qid);
- mt76_wr(dev, MT_WMM_TXOP(qid), val);
-
- val = mt76_rr(dev, MT_WMM_AIFSN);
- val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid));
- val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid);
- mt76_wr(dev, MT_WMM_AIFSN, val);
-
- val = mt76_rr(dev, MT_WMM_CWMIN);
- val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid));
- val |= cw_min << MT_WMM_CWMIN_SHIFT(qid);
- mt76_wr(dev, MT_WMM_CWMIN, val);
-
- val = mt76_rr(dev, MT_WMM_CWMAX);
- val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid));
- val |= cw_max << MT_WMM_CWMAX_SHIFT(qid);
- mt76_wr(dev, MT_WMM_CWMAX, val);
-
- return 0;
-}
-
static void
mt76x2_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac)
@@ -480,75 +247,6 @@ mt76x2_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm)
return 0;
}
-static int
-mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_ampdu_params *params)
-{
- enum ieee80211_ampdu_mlme_action action = params->action;
- struct ieee80211_sta *sta = params->sta;
- struct mt76x2_dev *dev = hw->priv;
- struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
- struct ieee80211_txq *txq = sta->txq[params->tid];
- u16 tid = params->tid;
- u16 *ssn = &params->ssn;
- struct mt76_txq *mtxq;
-
- if (!txq)
- return -EINVAL;
-
- mtxq = (struct mt76_txq *)txq->drv_priv;
-
- switch (action) {
- case IEEE80211_AMPDU_RX_START:
- mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn, params->buf_size);
- mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
- break;
- case IEEE80211_AMPDU_RX_STOP:
- mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
- mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
- BIT(16 + tid));
- break;
- case IEEE80211_AMPDU_TX_OPERATIONAL:
- mtxq->aggr = true;
- mtxq->send_bar = false;
- ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
- break;
- case IEEE80211_AMPDU_TX_STOP_FLUSH:
- case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
- mtxq->aggr = false;
- ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
- break;
- case IEEE80211_AMPDU_TX_START:
- mtxq->agg_ssn = *ssn << 4;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
- case IEEE80211_AMPDU_TX_STOP_CONT:
- mtxq->aggr = false;
- ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
- }
-
- return 0;
-}
-
-static void
-mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt76x2_dev *dev = hw->priv;
- struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
- struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates);
- struct ieee80211_tx_rate rate = {};
-
- if (!rates)
- return;
-
- rate.idx = rates->rate[0].idx;
- rate.flags = rates->rate[0].flags;
- mt76x2_mac_wcid_set_rate(dev, &msta->wcid, &rate);
- msta->wcid.max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, &rate);
-}
-
static void mt76x2_set_coverage_class(struct ieee80211_hw *hw,
s16 coverage_class)
{
@@ -600,6 +298,21 @@ static int mt76x2_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant,
return 0;
}
+static int
+mt76x2_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ if (val != ~0 && val > 0xffff)
+ return -EINVAL;
+
+ mutex_lock(&dev->mutex);
+ mt76x2_mac_set_tx_protection(dev, val);
+ mutex_unlock(&dev->mutex);
+
+ return 0;
+}
+
const struct ieee80211_ops mt76x2_ops = {
.tx = mt76x2_tx,
.start = mt76x2_start,
@@ -626,5 +339,6 @@ const struct ieee80211_ops mt76x2_ops = {
.set_tim = mt76x2_set_tim,
.set_antenna = mt76x2_set_antenna,
.get_antenna = mt76x2_get_antenna,
+ .set_rts_threshold = mt76x2_set_rts_threshold,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
index dfd36d736b06..743da57760dc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
@@ -23,23 +23,6 @@
#include "mt76x2_dma.h"
#include "mt76x2_eeprom.h"
-struct mt76x2_fw_header {
- __le32 ilm_len;
- __le32 dlm_len;
- __le16 build_ver;
- __le16 fw_ver;
- u8 pad[4];
- char build_time[16];
-};
-
-struct mt76x2_patch_header {
- char build_time[16];
- char platform[4];
- char hw_version[4];
- char patch_version[4];
- u8 pad[2];
-};
-
static struct sk_buff *mt76x2_mcu_msg_alloc(const void *data, int len)
{
struct sk_buff *skb;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h
index d7a7e83262ce..e40293f21417 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h
@@ -146,6 +146,23 @@ struct mt76x2_tssi_comp {
u8 offset1;
} __packed __aligned(4);
+struct mt76x2_fw_header {
+ __le32 ilm_len;
+ __le32 dlm_len;
+ __le16 build_ver;
+ __le16 fw_ver;
+ u8 pad[4];
+ char build_time[16];
+};
+
+struct mt76x2_patch_header {
+ char build_time[16];
+ char platform[4];
+ char hw_version[4];
+ char patch_version[4];
+ u8 pad[2];
+};
+
int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
u32 param);
int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev, struct mt76x2_tssi_comp *tssi_data);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
index c1c38ca3330a..84c96c0415b6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
@@ -19,172 +19,6 @@
#include "mt76x2_mcu.h"
#include "mt76x2_eeprom.h"
-static void
-mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset)
-{
- s8 gain;
-
- gain = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
- gain -= offset / 2;
- mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_LNA_HIGH_GAIN, gain);
-}
-
-static void
-mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset)
-{
- s8 gain;
-
- gain = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
- gain += offset;
- mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain);
-}
-
-static void
-mt76x2_apply_gain_adj(struct mt76x2_dev *dev)
-{
- s8 *gain_adj = dev->cal.rx.high_gain;
-
- mt76x2_adjust_high_lna_gain(dev, 4, gain_adj[0]);
- mt76x2_adjust_high_lna_gain(dev, 5, gain_adj[1]);
-
- mt76x2_adjust_agc_gain(dev, 8, gain_adj[0]);
- mt76x2_adjust_agc_gain(dev, 9, gain_adj[1]);
-}
-
-static u32
-mt76x2_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4)
-{
- u32 val = 0;
-
- val |= (v1 & (BIT(6) - 1)) << 0;
- val |= (v2 & (BIT(6) - 1)) << 8;
- val |= (v3 & (BIT(6) - 1)) << 16;
- val |= (v4 & (BIT(6) - 1)) << 24;
- return val;
-}
-
-int mt76x2_phy_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain)
-{
- struct mt76x2_rx_freq_cal *cal = &dev->cal.rx;
-
- rssi += cal->rssi_offset[chain];
- rssi -= cal->lna_gain;
-
- return rssi;
-}
-
-static void
-mt76x2_add_rate_power_offset(struct mt76_rate_power *r, int offset)
-{
- int i;
-
- for (i = 0; i < sizeof(r->all); i++)
- r->all[i] += offset;
-}
-
-static void
-mt76x2_limit_rate_power(struct mt76_rate_power *r, int limit)
-{
- int i;
-
- for (i = 0; i < sizeof(r->all); i++)
- if (r->all[i] > limit)
- r->all[i] = limit;
-}
-
-static int
-mt76x2_get_min_rate_power(struct mt76_rate_power *r)
-{
- int i;
- s8 ret = 0;
-
- for (i = 0; i < sizeof(r->all); i++) {
- if (!r->all[i])
- continue;
-
- if (ret)
- ret = min(ret, r->all[i]);
- else
- ret = r->all[i];
- }
-
- return ret;
-}
-
-
-void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
-{
- enum nl80211_chan_width width = dev->mt76.chandef.width;
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
- struct mt76x2_tx_power_info txp;
- int txp_0, txp_1, delta = 0;
- struct mt76_rate_power t = {};
- int base_power, gain;
-
- mt76x2_get_power_info(dev, &txp, chan);
-
- if (width == NL80211_CHAN_WIDTH_40)
- delta = txp.delta_bw40;
- else if (width == NL80211_CHAN_WIDTH_80)
- delta = txp.delta_bw80;
-
- mt76x2_get_rate_power(dev, &t, chan);
- mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power);
- mt76x2_limit_rate_power(&t, dev->txpower_conf);
- dev->txpower_cur = mt76x2_get_max_rate_power(&t);
-
- base_power = mt76x2_get_min_rate_power(&t);
- delta += base_power - txp.chain[0].target_power;
- txp_0 = txp.chain[0].target_power + txp.chain[0].delta + delta;
- txp_1 = txp.chain[1].target_power + txp.chain[1].delta + delta;
-
- gain = min(txp_0, txp_1);
- if (gain < 0) {
- base_power -= gain;
- txp_0 -= gain;
- txp_1 -= gain;
- } else if (gain > 0x2f) {
- base_power -= gain - 0x2f;
- txp_0 = 0x2f;
- txp_1 = 0x2f;
- }
-
- mt76x2_add_rate_power_offset(&t, -base_power);
- dev->target_power = txp.chain[0].target_power;
- dev->target_power_delta[0] = txp_0 - txp.chain[0].target_power;
- dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power;
- dev->rate_power = t;
-
- mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
- mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
-
- mt76_wr(dev, MT_TX_PWR_CFG_0,
- mt76x2_tx_power_mask(t.cck[0], t.cck[2], t.ofdm[0], t.ofdm[2]));
- mt76_wr(dev, MT_TX_PWR_CFG_1,
- mt76x2_tx_power_mask(t.ofdm[4], t.ofdm[6], t.ht[0], t.ht[2]));
- mt76_wr(dev, MT_TX_PWR_CFG_2,
- mt76x2_tx_power_mask(t.ht[4], t.ht[6], t.ht[8], t.ht[10]));
- mt76_wr(dev, MT_TX_PWR_CFG_3,
- mt76x2_tx_power_mask(t.ht[12], t.ht[14], t.ht[0], t.ht[2]));
- mt76_wr(dev, MT_TX_PWR_CFG_4,
- mt76x2_tx_power_mask(t.ht[4], t.ht[6], 0, 0));
- mt76_wr(dev, MT_TX_PWR_CFG_7,
- mt76x2_tx_power_mask(t.ofdm[6], t.vht[8], t.ht[6], t.vht[8]));
- mt76_wr(dev, MT_TX_PWR_CFG_8,
- mt76x2_tx_power_mask(t.ht[14], t.vht[8], t.vht[8], 0));
- mt76_wr(dev, MT_TX_PWR_CFG_9,
- mt76x2_tx_power_mask(t.ht[6], t.vht[8], t.vht[8], 0));
-}
-
-static bool
-mt76x2_channel_silent(struct mt76x2_dev *dev)
-{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
-
- return ((chan->flags & IEEE80211_CHAN_RADAR) &&
- chan->dfs_state != NL80211_DFS_AVAILABLE);
-}
-
static bool
mt76x2_phy_tssi_init_cal(struct mt76x2_dev *dev)
{
@@ -243,140 +77,6 @@ mt76x2_phy_channel_calibrate(struct mt76x2_dev *dev, bool mac_stopped)
dev->cal.channel_cal_done = true;
}
-static void
-mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, enum nl80211_band band)
-{
- u32 pa_mode[2];
- u32 pa_mode_adj;
-
- if (band == NL80211_BAND_2GHZ) {
- pa_mode[0] = 0x010055ff;
- pa_mode[1] = 0x00550055;
-
- mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00);
- mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06);
-
- if (mt76x2_ext_pa_enabled(dev, band)) {
- mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00);
- mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00);
- } else {
- mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0xf4000200);
- mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0xfa000200);
- }
- } else {
- pa_mode[0] = 0x0000ffff;
- pa_mode[1] = 0x00ff00ff;
-
- if (mt76x2_ext_pa_enabled(dev, band)) {
- mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400);
- mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476);
- } else {
- mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400);
- mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476);
- }
-
- if (mt76x2_ext_pa_enabled(dev, band))
- pa_mode_adj = 0x04000000;
- else
- pa_mode_adj = 0;
-
- mt76_wr(dev, MT_RF_PA_MODE_ADJ0, pa_mode_adj);
- mt76_wr(dev, MT_RF_PA_MODE_ADJ1, pa_mode_adj);
- }
-
- mt76_wr(dev, MT_BB_PA_MODE_CFG0, pa_mode[0]);
- mt76_wr(dev, MT_BB_PA_MODE_CFG1, pa_mode[1]);
- mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]);
- mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]);
-
- if (mt76x2_ext_pa_enabled(dev, band)) {
- u32 val;
-
- if (band == NL80211_BAND_2GHZ)
- val = 0x3c3c023c;
- else
- val = 0x363c023c;
-
- mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
- mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
- mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00001818);
- } else {
- if (band == NL80211_BAND_2GHZ) {
- u32 val = 0x0f3c3c3c;
-
- mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
- mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
- mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00000606);
- } else {
- mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x383c023c);
- mt76_wr(dev, MT_TX1_RF_GAIN_CORR, 0x24282e28);
- mt76_wr(dev, MT_TX_ALC_CFG_4, 0);
- }
- }
-}
-
-static void
-mt76x2_configure_tx_delay(struct mt76x2_dev *dev, enum nl80211_band band, u8 bw)
-{
- u32 cfg0, cfg1;
-
- if (mt76x2_ext_pa_enabled(dev, band)) {
- cfg0 = bw ? 0x000b0c01 : 0x00101101;
- cfg1 = 0x00011414;
- } else {
- cfg0 = bw ? 0x000b0b01 : 0x00101001;
- cfg1 = 0x00021414;
- }
- mt76_wr(dev, MT_TX_SW_CFG0, cfg0);
- mt76_wr(dev, MT_TX_SW_CFG1, cfg1);
-
- mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS, 15);
-}
-
-static void
-mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl)
-{
- int core_val, agc_val;
-
- switch (width) {
- case NL80211_CHAN_WIDTH_80:
- core_val = 3;
- agc_val = 7;
- break;
- case NL80211_CHAN_WIDTH_40:
- core_val = 2;
- agc_val = 3;
- break;
- default:
- core_val = 0;
- agc_val = 1;
- break;
- }
-
- mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
- mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
- mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
- mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
-}
-
-static void
-mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper)
-{
- switch (band) {
- case NL80211_BAND_2GHZ:
- mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
- mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
- break;
- case NL80211_BAND_5GHZ:
- mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
- mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
- break;
- }
-
- mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
- primary_upper);
-}
-
void mt76x2_phy_set_antenna(struct mt76x2_dev *dev)
{
u32 val;
@@ -485,12 +185,14 @@ static void
mt76x2_phy_adjust_vga_gain(struct mt76x2_dev *dev)
{
u32 false_cca;
- u8 limit = dev->cal.low_gain > 1 ? 4 : 16;
+ u8 limit = dev->cal.low_gain > 0 ? 16 : 4;
false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1));
+ dev->cal.false_cca = false_cca;
if (false_cca > 800 && dev->cal.agc_gain_adjust < limit)
dev->cal.agc_gain_adjust += 2;
- else if (false_cca < 10 && dev->cal.agc_gain_adjust > 0)
+ else if ((false_cca < 10 && dev->cal.agc_gain_adjust > 0) ||
+ (dev->cal.agc_gain_adjust >= limit && false_cca < 500))
dev->cal.agc_gain_adjust -= 2;
else
return;
@@ -501,57 +203,65 @@ mt76x2_phy_adjust_vga_gain(struct mt76x2_dev *dev)
static void
mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev)
{
- u32 val = mt76_rr(dev, MT_BBP(AGC, 20));
- int rssi0 = (s8) FIELD_GET(MT_BBP_AGC20_RSSI0, val);
- int rssi1 = (s8) FIELD_GET(MT_BBP_AGC20_RSSI1, val);
u8 *gain = dev->cal.agc_gain_init;
- u8 gain_delta;
+ u8 low_gain_delta, gain_delta;
+ bool gain_change;
int low_gain;
+ u32 val;
- dev->cal.avg_rssi[0] = (dev->cal.avg_rssi[0] * 15) / 16 +
- (rssi0 << 8) / 16;
- dev->cal.avg_rssi[1] = (dev->cal.avg_rssi[1] * 15) / 16 +
- (rssi1 << 8) / 16;
- dev->cal.avg_rssi_all = (dev->cal.avg_rssi[0] +
- dev->cal.avg_rssi[1]) / 512;
+ dev->cal.avg_rssi_all = mt76x2_phy_get_min_avg_rssi(dev);
low_gain = (dev->cal.avg_rssi_all > mt76x2_get_rssi_gain_thresh(dev)) +
(dev->cal.avg_rssi_all > mt76x2_get_low_rssi_gain_thresh(dev));
- if (dev->cal.low_gain == low_gain) {
+ gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2);
+ dev->cal.low_gain = low_gain;
+
+ if (!gain_change) {
mt76x2_phy_adjust_vga_gain(dev);
return;
}
- dev->cal.low_gain = low_gain;
-
- if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
+ if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) {
mt76_wr(dev, MT_BBP(RXO, 14), 0x00560211);
- else
+ val = mt76_rr(dev, MT_BBP(AGC, 26)) & ~0xf;
+ if (low_gain == 2)
+ val |= 0x3;
+ else
+ val |= 0x5;
+ mt76_wr(dev, MT_BBP(AGC, 26), val);
+ } else {
mt76_wr(dev, MT_BBP(RXO, 14), 0x00560423);
+ }
- if (low_gain) {
- mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991);
+ if (mt76x2_has_ext_lna(dev))
+ low_gain_delta = 10;
+ else
+ low_gain_delta = 14;
+
+ if (low_gain == 2) {
+ mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990);
mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808);
mt76_wr(dev, MT_BBP(AGC, 37), 0x08080808);
- if (mt76x2_has_ext_lna(dev))
- gain_delta = 10;
- else
- gain_delta = 14;
+ gain_delta = low_gain_delta;
+ dev->cal.agc_gain_adjust = 0;
} else {
- mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990);
+ mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991);
if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014);
else
mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116);
mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C);
gain_delta = 0;
+ dev->cal.agc_gain_adjust = low_gain_delta;
}
dev->cal.agc_gain_cur[0] = gain[0] - gain_delta;
dev->cal.agc_gain_cur[1] = gain[1] - gain_delta;
- dev->cal.agc_gain_adjust = 0;
mt76x2_phy_set_gain_val(dev);
+
+ /* clear false CCA counters */
+ mt76_rr(dev, MT_RX_STAT_1);
}
int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c
new file mode 100644
index 000000000000..9fd6ab4cbb94
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c
@@ -0,0 +1,349 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+#include "mt76x2_eeprom.h"
+
+static void
+mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset)
+{
+ s8 gain;
+
+ gain = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
+ gain -= offset / 2;
+ mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_LNA_HIGH_GAIN, gain);
+}
+
+static void
+mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset)
+{
+ s8 gain;
+
+ gain = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
+ gain += offset;
+ mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain);
+}
+
+void mt76x2_apply_gain_adj(struct mt76x2_dev *dev)
+{
+ s8 *gain_adj = dev->cal.rx.high_gain;
+
+ mt76x2_adjust_high_lna_gain(dev, 4, gain_adj[0]);
+ mt76x2_adjust_high_lna_gain(dev, 5, gain_adj[1]);
+
+ mt76x2_adjust_agc_gain(dev, 8, gain_adj[0]);
+ mt76x2_adjust_agc_gain(dev, 9, gain_adj[1]);
+}
+EXPORT_SYMBOL_GPL(mt76x2_apply_gain_adj);
+
+void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev,
+ enum nl80211_band band)
+{
+ u32 pa_mode[2];
+ u32 pa_mode_adj;
+
+ if (band == NL80211_BAND_2GHZ) {
+ pa_mode[0] = 0x010055ff;
+ pa_mode[1] = 0x00550055;
+
+ mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00);
+ mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06);
+
+ if (mt76x2_ext_pa_enabled(dev, band)) {
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00);
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00);
+ } else {
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0xf4000200);
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0xfa000200);
+ }
+ } else {
+ pa_mode[0] = 0x0000ffff;
+ pa_mode[1] = 0x00ff00ff;
+
+ if (mt76x2_ext_pa_enabled(dev, band)) {
+ mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400);
+ mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476);
+ } else {
+ mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400);
+ mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476);
+ }
+
+ if (mt76x2_ext_pa_enabled(dev, band))
+ pa_mode_adj = 0x04000000;
+ else
+ pa_mode_adj = 0;
+
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ0, pa_mode_adj);
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ1, pa_mode_adj);
+ }
+
+ mt76_wr(dev, MT_BB_PA_MODE_CFG0, pa_mode[0]);
+ mt76_wr(dev, MT_BB_PA_MODE_CFG1, pa_mode[1]);
+ mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]);
+ mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]);
+
+ if (mt76x2_ext_pa_enabled(dev, band)) {
+ u32 val;
+
+ if (band == NL80211_BAND_2GHZ)
+ val = 0x3c3c023c;
+ else
+ val = 0x363c023c;
+
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
+ mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
+ mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00001818);
+ } else {
+ if (band == NL80211_BAND_2GHZ) {
+ u32 val = 0x0f3c3c3c;
+
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
+ mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
+ mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00000606);
+ } else {
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x383c023c);
+ mt76_wr(dev, MT_TX1_RF_GAIN_CORR, 0x24282e28);
+ mt76_wr(dev, MT_TX_ALC_CFG_4, 0);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower_regs);
+
+static void
+mt76x2_limit_rate_power(struct mt76_rate_power *r, int limit)
+{
+ int i;
+
+ for (i = 0; i < sizeof(r->all); i++)
+ if (r->all[i] > limit)
+ r->all[i] = limit;
+}
+
+static u32
+mt76x2_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4)
+{
+ u32 val = 0;
+
+ val |= (v1 & (BIT(6) - 1)) << 0;
+ val |= (v2 & (BIT(6) - 1)) << 8;
+ val |= (v3 & (BIT(6) - 1)) << 16;
+ val |= (v4 & (BIT(6) - 1)) << 24;
+ return val;
+}
+
+static void
+mt76x2_add_rate_power_offset(struct mt76_rate_power *r, int offset)
+{
+ int i;
+
+ for (i = 0; i < sizeof(r->all); i++)
+ r->all[i] += offset;
+}
+
+static int
+mt76x2_get_min_rate_power(struct mt76_rate_power *r)
+{
+ int i;
+ s8 ret = 0;
+
+ for (i = 0; i < sizeof(r->all); i++) {
+ if (!r->all[i])
+ continue;
+
+ if (ret)
+ ret = min(ret, r->all[i]);
+ else
+ ret = r->all[i];
+ }
+
+ return ret;
+}
+
+void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
+{
+ enum nl80211_chan_width width = dev->mt76.chandef.width;
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct mt76x2_tx_power_info txp;
+ int txp_0, txp_1, delta = 0;
+ struct mt76_rate_power t = {};
+ int base_power, gain;
+
+ mt76x2_get_power_info(dev, &txp, chan);
+
+ if (width == NL80211_CHAN_WIDTH_40)
+ delta = txp.delta_bw40;
+ else if (width == NL80211_CHAN_WIDTH_80)
+ delta = txp.delta_bw80;
+
+ mt76x2_get_rate_power(dev, &t, chan);
+ mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power);
+ mt76x2_limit_rate_power(&t, dev->txpower_conf);
+ dev->txpower_cur = mt76x2_get_max_rate_power(&t);
+
+ base_power = mt76x2_get_min_rate_power(&t);
+ delta += base_power - txp.chain[0].target_power;
+ txp_0 = txp.chain[0].target_power + txp.chain[0].delta + delta;
+ txp_1 = txp.chain[1].target_power + txp.chain[1].delta + delta;
+
+ gain = min(txp_0, txp_1);
+ if (gain < 0) {
+ base_power -= gain;
+ txp_0 -= gain;
+ txp_1 -= gain;
+ } else if (gain > 0x2f) {
+ base_power -= gain - 0x2f;
+ txp_0 = 0x2f;
+ txp_1 = 0x2f;
+ }
+
+ mt76x2_add_rate_power_offset(&t, -base_power);
+ dev->target_power = txp.chain[0].target_power;
+ dev->target_power_delta[0] = txp_0 - txp.chain[0].target_power;
+ dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power;
+ dev->rate_power = t;
+
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
+
+ mt76_wr(dev, MT_TX_PWR_CFG_0,
+ mt76x2_tx_power_mask(t.cck[0], t.cck[2], t.ofdm[0], t.ofdm[2]));
+ mt76_wr(dev, MT_TX_PWR_CFG_1,
+ mt76x2_tx_power_mask(t.ofdm[4], t.ofdm[6], t.ht[0], t.ht[2]));
+ mt76_wr(dev, MT_TX_PWR_CFG_2,
+ mt76x2_tx_power_mask(t.ht[4], t.ht[6], t.ht[8], t.ht[10]));
+ mt76_wr(dev, MT_TX_PWR_CFG_3,
+ mt76x2_tx_power_mask(t.ht[12], t.ht[14], t.ht[0], t.ht[2]));
+ mt76_wr(dev, MT_TX_PWR_CFG_4,
+ mt76x2_tx_power_mask(t.ht[4], t.ht[6], 0, 0));
+ mt76_wr(dev, MT_TX_PWR_CFG_7,
+ mt76x2_tx_power_mask(t.ofdm[6], t.vht[8], t.ht[6], t.vht[8]));
+ mt76_wr(dev, MT_TX_PWR_CFG_8,
+ mt76x2_tx_power_mask(t.ht[14], t.vht[8], t.vht[8], 0));
+ mt76_wr(dev, MT_TX_PWR_CFG_9,
+ mt76x2_tx_power_mask(t.ht[6], t.vht[8], t.vht[8], 0));
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower);
+
+void mt76x2_configure_tx_delay(struct mt76x2_dev *dev,
+ enum nl80211_band band, u8 bw)
+{
+ u32 cfg0, cfg1;
+
+ if (mt76x2_ext_pa_enabled(dev, band)) {
+ cfg0 = bw ? 0x000b0c01 : 0x00101101;
+ cfg1 = 0x00011414;
+ } else {
+ cfg0 = bw ? 0x000b0b01 : 0x00101001;
+ cfg1 = 0x00021414;
+ }
+ mt76_wr(dev, MT_TX_SW_CFG0, cfg0);
+ mt76_wr(dev, MT_TX_SW_CFG1, cfg1);
+
+ mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS, 15);
+}
+EXPORT_SYMBOL_GPL(mt76x2_configure_tx_delay);
+
+void mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl)
+{
+ int core_val, agc_val;
+
+ switch (width) {
+ case NL80211_CHAN_WIDTH_80:
+ core_val = 3;
+ agc_val = 7;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ core_val = 2;
+ agc_val = 3;
+ break;
+ default:
+ core_val = 0;
+ agc_val = 1;
+ break;
+ }
+
+ mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
+ mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
+ mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
+ mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_set_bw);
+
+void mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper)
+{
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+ mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+ break;
+ case NL80211_BAND_5GHZ:
+ mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+ mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+ break;
+ }
+
+ mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
+ primary_upper);
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_set_band);
+
+int mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev)
+{
+ struct mt76x2_sta *sta;
+ struct mt76_wcid *wcid;
+ int i, j, min_rssi = 0;
+ s8 cur_rssi;
+
+ local_bh_disable();
+ rcu_read_lock();
+
+ for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
+ unsigned long mask = dev->wcid_mask[i];
+
+ if (!mask)
+ continue;
+
+ for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) {
+ if (!(mask & 1))
+ continue;
+
+ wcid = rcu_dereference(dev->wcid[j]);
+ if (!wcid)
+ continue;
+
+ sta = container_of(wcid, struct mt76x2_sta, wcid);
+ spin_lock(&dev->mt76.rx_lock);
+ if (sta->inactive_count++ < 5)
+ cur_rssi = ewma_signal_read(&sta->rssi);
+ else
+ cur_rssi = 0;
+ spin_unlock(&dev->mt76.rx_lock);
+
+ if (cur_rssi < min_rssi)
+ min_rssi = cur_rssi;
+ }
+ }
+
+ rcu_read_unlock();
+ local_bh_enable();
+
+ if (!min_rssi)
+ return -75;
+
+ return min_rssi;
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_get_min_avg_rssi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h
index b9c334d9e5b8..1551ea453180 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h
@@ -75,6 +75,21 @@
#define MT_XO_CTRL7 0x011c
+#define MT_USB_U3DMA_CFG 0x9018
+#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT GENMASK(7, 0)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT GENMASK(15, 8)
+#define MT_USB_DMA_CFG_UDMA_TX_WL_DROP BIT(16)
+#define MT_USB_DMA_CFG_WAKE_UP_EN BIT(17)
+#define MT_USB_DMA_CFG_RX_DROP_OR_PAD BIT(18)
+#define MT_USB_DMA_CFG_TX_CLR BIT(19)
+#define MT_USB_DMA_CFG_TXOP_HALT BIT(20)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_EN BIT(21)
+#define MT_USB_DMA_CFG_RX_BULK_EN BIT(22)
+#define MT_USB_DMA_CFG_TX_BULK_EN BIT(23)
+#define MT_USB_DMA_CFG_EP_OUT_VALID GENMASK(29, 24)
+#define MT_USB_DMA_CFG_RX_BUSY BIT(30)
+#define MT_USB_DMA_CFG_TX_BUSY BIT(31)
+
#define MT_WLAN_MTC_CTRL 0x10148
#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP BIT(0)
#define MT_WLAN_MTC_CTRL_PWR_ACK BIT(12)
@@ -150,6 +165,9 @@
#define MT_TX_HW_QUEUE_MCU 8
#define MT_TX_HW_QUEUE_MGMT 9
+#define MT_US_CYC_CFG 0x02a4
+#define MT_US_CYC_CNT GENMASK(7, 0)
+
#define MT_PBF_SYS_CTRL 0x0400
#define MT_PBF_SYS_CTRL_MCU_RESET BIT(0)
#define MT_PBF_SYS_CTRL_DMA_RESET BIT(1)
@@ -202,6 +220,11 @@
#define MT_FCE_WLAN_FLOW_CONTROL1 0x0824
+#define MT_TX_CPU_FROM_FCE_BASE_PTR 0x09a0
+#define MT_TX_CPU_FROM_FCE_MAX_COUNT 0x09a4
+#define MT_FCE_PDMA_GLOBAL_CONF 0x09c4
+#define MT_FCE_SKIP_FS 0x0a6c
+
#define MT_PAUSE_ENABLE_CONTROL1 0x0a38
#define MT_MAC_CSR0 0x1000
@@ -214,6 +237,7 @@
#define MT_MAC_ADDR_DW0 0x1008
#define MT_MAC_ADDR_DW1 0x100c
+#define MT_MAC_ADDR_DW1_U2ME_MASK GENMASK(23, 16)
#define MT_MAC_BSSID_DW0 0x1010
#define MT_MAC_BSSID_DW1 0x1014
@@ -351,6 +375,7 @@
#define MT_TX_TIMEOUT_CFG_ACKTO GENMASK(15, 8)
#define MT_TX_RETRY_CFG 0x134c
+#define MT_TX_LINK_CFG 0x1350
#define MT_VHT_HT_FBK_CFG1 0x1358
#define MT_PROT_CFG_RATE GENMASK(15, 0)
@@ -425,6 +450,7 @@
#define MT_RX_FILTR_CFG_BAR BIT(15)
#define MT_RX_FILTR_CFG_CTRL_RSV BIT(16)
+#define MT_AUTO_RSP_CFG 0x1404
#define MT_LEGACY_BASIC_RATE 0x1408
#define MT_HT_BASIC_RATE 0x140c
@@ -460,6 +486,10 @@
#define MT_RX_STAT_2_DUP_ERRORS GENMASK(15, 0)
#define MT_RX_STAT_2_OVERFLOW_ERRORS GENMASK(31, 16)
+#define MT_TX_STA_0 0x170c
+#define MT_TX_STA_1 0x1710
+#define MT_TX_STA_2 0x1714
+
#define MT_TX_STAT_FIFO 0x1718
#define MT_TX_STAT_FIFO_VALID BIT(0)
#define MT_TX_STAT_FIFO_SUCCESS BIT(5)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
index e46eafc4c436..4c907882e8b0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
@@ -23,129 +23,6 @@ struct beacon_bc_data {
struct sk_buff *tail[8];
};
-void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
- struct sk_buff *skb)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct mt76x2_dev *dev = hw->priv;
- struct ieee80211_vif *vif = info->control.vif;
- struct mt76_wcid *wcid = &dev->global_wcid;
-
- if (control->sta) {
- struct mt76x2_sta *msta;
-
- msta = (struct mt76x2_sta *) control->sta->drv_priv;
- wcid = &msta->wcid;
- /* sw encrypted frames */
- if (!info->control.hw_key && wcid->hw_key_idx != -1)
- control->sta = NULL;
- }
-
- if (vif && !control->sta) {
- struct mt76x2_vif *mvif;
-
- mvif = (struct mt76x2_vif *) vif->drv_priv;
- wcid = &mvif->group_wcid;
- }
-
- mt76_tx(&dev->mt76, control->sta, wcid, skb);
-}
-
-void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
- if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- ieee80211_free_txskb(mt76_hw(dev), skb);
- } else {
- ieee80211_tx_info_clear_status(info);
- info->status.rates[0].idx = -1;
- info->flags |= IEEE80211_TX_STAT_ACK;
- ieee80211_tx_status(mt76_hw(dev), skb);
- }
-}
-
-s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
- const struct ieee80211_tx_rate *rate)
-{
- s8 max_txpwr;
-
- if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
- u8 mcs = ieee80211_rate_get_vht_mcs(rate);
-
- if (mcs == 8 || mcs == 9) {
- max_txpwr = dev->rate_power.vht[8];
- } else {
- u8 nss, idx;
-
- nss = ieee80211_rate_get_vht_nss(rate);
- idx = ((nss - 1) << 3) + mcs;
- max_txpwr = dev->rate_power.ht[idx & 0xf];
- }
- } else if (rate->flags & IEEE80211_TX_RC_MCS) {
- max_txpwr = dev->rate_power.ht[rate->idx & 0xf];
- } else {
- enum nl80211_band band = dev->mt76.chandef.chan->band;
-
- if (band == NL80211_BAND_2GHZ) {
- const struct ieee80211_rate *r;
- struct wiphy *wiphy = mt76_hw(dev)->wiphy;
- struct mt76_rate_power *rp = &dev->rate_power;
-
- r = &wiphy->bands[band]->bitrates[rate->idx];
- if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
- max_txpwr = rp->cck[r->hw_value & 0x3];
- else
- max_txpwr = rp->ofdm[r->hw_value & 0x7];
- } else {
- max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7];
- }
- }
-
- return max_txpwr;
-}
-
-s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj)
-{
- txpwr = min_t(s8, txpwr, dev->txpower_conf);
- txpwr -= (dev->target_power + dev->target_power_delta[0]);
- txpwr = min_t(s8, txpwr, max_txpwr_adj);
-
- if (!dev->enable_tpc)
- return 0;
- else if (txpwr >= 0)
- return min_t(s8, txpwr, 7);
- else
- return (txpwr < -16) ? 8 : (txpwr + 32) / 2;
-}
-
-void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr)
-{
- s8 txpwr_adj;
-
- txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, txpwr,
- dev->rate_power.ofdm[4]);
- mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
- MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
- mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
- MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj);
-}
-
-static int mt76x2_insert_hdr_pad(struct sk_buff *skb)
-{
- int len = ieee80211_get_hdrlen_from_skb(skb);
-
- if (len % 4 == 0)
- return 0;
-
- skb_push(skb, 2);
- memmove(skb->data, skb->data + 2, len);
-
- skb->data[len] = 0;
- skb->data[len + 1] = 0;
- return 2;
-}
-
int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
struct sk_buff *skb, struct mt76_queue *q,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
@@ -159,7 +36,7 @@ int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128)
mt76x2_mac_wcid_set_drop(dev, wcid->idx, false);
- mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta);
+ mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta, skb->len);
ret = mt76x2_insert_hdr_pad(skb);
if (ret < 0)
@@ -218,6 +95,37 @@ mt76x2_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
data->tail[mvif->idx] = skb;
}
+static void
+mt76x2_resync_beacon_timer(struct mt76x2_dev *dev)
+{
+ u32 timer_val = dev->beacon_int << 4;
+
+ dev->tbtt_count++;
+
+ /*
+ * Beacon timer drifts by 1us every tick, the timer is configured
+ * in 1/16 TU (64us) units.
+ */
+ if (dev->tbtt_count < 62)
+ return;
+
+ if (dev->tbtt_count >= 64) {
+ dev->tbtt_count = 0;
+ return;
+ }
+
+ /*
+ * The updated beacon interval takes effect after two TBTT, because
+ * at this point the original interval has already been loaded into
+ * the next TBTT_TIMER value
+ */
+ if (dev->tbtt_count == 62)
+ timer_val -= 1;
+
+ mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
+ MT_BEACON_TIME_CFG_INTVAL, timer_val);
+}
+
void mt76x2_pre_tbtt_tasklet(unsigned long arg)
{
struct mt76x2_dev *dev = (struct mt76x2_dev *) arg;
@@ -226,6 +134,8 @@ void mt76x2_pre_tbtt_tasklet(unsigned long arg)
struct sk_buff *skb;
int i, nframes;
+ mt76x2_resync_beacon_timer(dev);
+
data.dev = dev;
__skb_queue_head_init(&data.q);
@@ -256,7 +166,8 @@ void mt76x2_pre_tbtt_tasklet(unsigned long arg)
struct ieee80211_vif *vif = info->control.vif;
struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
- mt76_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid, NULL);
+ mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid,
+ NULL);
}
spin_unlock_bh(&q->lock);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c
new file mode 100644
index 000000000000..36afb166fa3f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+#include "mt76x2_dma.h"
+
+void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt76x2_dev *dev = hw->priv;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_wcid *wcid = &dev->global_wcid;
+
+ if (control->sta) {
+ struct mt76x2_sta *msta;
+
+ msta = (struct mt76x2_sta *)control->sta->drv_priv;
+ wcid = &msta->wcid;
+ /* sw encrypted frames */
+ if (!info->control.hw_key && wcid->hw_key_idx != -1)
+ control->sta = NULL;
+ }
+
+ if (vif && !control->sta) {
+ struct mt76x2_vif *mvif;
+
+ mvif = (struct mt76x2_vif *)vif->drv_priv;
+ wcid = &mvif->group_wcid;
+ }
+
+ mt76_tx(&dev->mt76, control->sta, wcid, skb);
+}
+EXPORT_SYMBOL_GPL(mt76x2_tx);
+
+int mt76x2_insert_hdr_pad(struct sk_buff *skb)
+{
+ int len = ieee80211_get_hdrlen_from_skb(skb);
+
+ if (len % 4 == 0)
+ return 0;
+
+ skb_push(skb, 2);
+ memmove(skb->data, skb->data + 2, len);
+
+ skb->data[len] = 0;
+ skb->data[len + 1] = 0;
+ return 2;
+}
+EXPORT_SYMBOL_GPL(mt76x2_insert_hdr_pad);
+
+s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
+ const struct ieee80211_tx_rate *rate)
+{
+ s8 max_txpwr;
+
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+ u8 mcs = ieee80211_rate_get_vht_mcs(rate);
+
+ if (mcs == 8 || mcs == 9) {
+ max_txpwr = dev->rate_power.vht[8];
+ } else {
+ u8 nss, idx;
+
+ nss = ieee80211_rate_get_vht_nss(rate);
+ idx = ((nss - 1) << 3) + mcs;
+ max_txpwr = dev->rate_power.ht[idx & 0xf];
+ }
+ } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+ max_txpwr = dev->rate_power.ht[rate->idx & 0xf];
+ } else {
+ enum nl80211_band band = dev->mt76.chandef.chan->band;
+
+ if (band == NL80211_BAND_2GHZ) {
+ const struct ieee80211_rate *r;
+ struct wiphy *wiphy = mt76_hw(dev)->wiphy;
+ struct mt76_rate_power *rp = &dev->rate_power;
+
+ r = &wiphy->bands[band]->bitrates[rate->idx];
+ if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
+ max_txpwr = rp->cck[r->hw_value & 0x3];
+ else
+ max_txpwr = rp->ofdm[r->hw_value & 0x7];
+ } else {
+ max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7];
+ }
+ }
+
+ return max_txpwr;
+}
+EXPORT_SYMBOL_GPL(mt76x2_tx_get_max_txpwr_adj);
+
+s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj)
+{
+ txpwr = min_t(s8, txpwr, dev->txpower_conf);
+ txpwr -= (dev->target_power + dev->target_power_delta[0]);
+ txpwr = min_t(s8, txpwr, max_txpwr_adj);
+
+ if (!dev->enable_tpc)
+ return 0;
+ else if (txpwr >= 0)
+ return min_t(s8, txpwr, 7);
+ else
+ return (txpwr < -16) ? 8 : (txpwr + 32) / 2;
+}
+EXPORT_SYMBOL_GPL(mt76x2_tx_get_txpwr_adj);
+
+void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr)
+{
+ s8 txpwr_adj;
+
+ txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, txpwr,
+ dev->rate_power.ofdm[4]);
+ mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
+ MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
+ mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
+ MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj);
+}
+EXPORT_SYMBOL_GPL(mt76x2_tx_set_txpwr_auto);
+
+void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ ieee80211_free_txskb(mt76_hw(dev), skb);
+ } else {
+ ieee80211_tx_info_clear_status(info);
+ info->status.rates[0].idx = -1;
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ ieee80211_tx_status(mt76_hw(dev), skb);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x2_tx_complete);
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c
new file mode 100644
index 000000000000..1428cfdee579
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "mt76x2u.h"
+
+static const struct usb_device_id mt76x2u_device_table[] = {
+ { USB_DEVICE(0x0b05, 0x1833) }, /* Asus USB-AC54 */
+ { USB_DEVICE(0x0b05, 0x17eb) }, /* Asus USB-AC55 */
+ { USB_DEVICE(0x0b05, 0x180b) }, /* Asus USB-N53 B1 */
+ { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USB-AC1200 */
+ { USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */
+ { USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */
+ { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */
+ { USB_DEVICE(0x045e, 0x02e6) }, /* XBox One Wireless Adapter */
+ { },
+};
+
+static int mt76x2u_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct mt76x2_dev *dev;
+ int err;
+
+ dev = mt76x2u_alloc_device(&intf->dev);
+ if (!dev)
+ return -ENOMEM;
+
+ udev = usb_get_dev(udev);
+ usb_reset_device(udev);
+
+ err = mt76u_init(&dev->mt76, intf);
+ if (err < 0)
+ goto err;
+
+ dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
+ dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
+
+ err = mt76x2u_register_device(dev);
+ if (err < 0)
+ goto err;
+
+ return 0;
+
+err:
+ ieee80211_free_hw(mt76_hw(dev));
+ usb_set_intfdata(intf, NULL);
+ usb_put_dev(udev);
+
+ return err;
+}
+
+static void mt76x2u_disconnect(struct usb_interface *intf)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct mt76x2_dev *dev = usb_get_intfdata(intf);
+ struct ieee80211_hw *hw = mt76_hw(dev);
+
+ set_bit(MT76_REMOVED, &dev->mt76.state);
+ ieee80211_unregister_hw(hw);
+ mt76x2u_cleanup(dev);
+
+ ieee80211_free_hw(hw);
+ usb_set_intfdata(intf, NULL);
+ usb_put_dev(udev);
+}
+
+static int __maybe_unused mt76x2u_suspend(struct usb_interface *intf,
+ pm_message_t state)
+{
+ struct mt76x2_dev *dev = usb_get_intfdata(intf);
+ struct mt76_usb *usb = &dev->mt76.usb;
+
+ mt76u_stop_queues(&dev->mt76);
+ mt76x2u_stop_hw(dev);
+ usb_kill_urb(usb->mcu.res.urb);
+
+ return 0;
+}
+
+static int __maybe_unused mt76x2u_resume(struct usb_interface *intf)
+{
+ struct mt76x2_dev *dev = usb_get_intfdata(intf);
+ struct mt76_usb *usb = &dev->mt76.usb;
+ int err;
+
+ reinit_completion(&usb->mcu.cmpl);
+ err = mt76u_submit_buf(&dev->mt76, USB_DIR_IN,
+ MT_EP_IN_CMD_RESP,
+ &usb->mcu.res, GFP_KERNEL,
+ mt76u_mcu_complete_urb,
+ &usb->mcu.cmpl);
+ if (err < 0)
+ return err;
+
+ err = mt76u_submit_rx_buffers(&dev->mt76);
+ if (err < 0)
+ return err;
+
+ tasklet_enable(&usb->rx_tasklet);
+ tasklet_enable(&usb->tx_tasklet);
+
+ return mt76x2u_init_hardware(dev);
+}
+
+MODULE_DEVICE_TABLE(usb, mt76x2u_device_table);
+MODULE_FIRMWARE(MT7662U_FIRMWARE);
+MODULE_FIRMWARE(MT7662U_ROM_PATCH);
+
+static struct usb_driver mt76x2u_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt76x2u_device_table,
+ .probe = mt76x2u_probe,
+ .disconnect = mt76x2u_disconnect,
+#ifdef CONFIG_PM
+ .suspend = mt76x2u_suspend,
+ .resume = mt76x2u_resume,
+ .reset_resume = mt76x2u_resume,
+#endif /* CONFIG_PM */
+ .soft_unbind = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(mt76x2u_driver);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u.h b/drivers/net/wireless/mediatek/mt76/mt76x2u.h
new file mode 100644
index 000000000000..008092f0cd8a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2U_H
+#define __MT76x2U_H
+
+#include <linux/device.h>
+
+#include "mt76x2.h"
+#include "mt76x2_dma.h"
+#include "mt76x2_mcu.h"
+
+#define MT7612U_EEPROM_SIZE 512
+
+#define MT_USB_AGGR_SIZE_LIMIT 21 /* 1024B unit */
+#define MT_USB_AGGR_TIMEOUT 0x80 /* 33ns unit */
+
+extern const struct ieee80211_ops mt76x2u_ops;
+
+struct mt76x2_dev *mt76x2u_alloc_device(struct device *pdev);
+int mt76x2u_register_device(struct mt76x2_dev *dev);
+int mt76x2u_init_hardware(struct mt76x2_dev *dev);
+void mt76x2u_cleanup(struct mt76x2_dev *dev);
+void mt76x2u_stop_hw(struct mt76x2_dev *dev);
+
+void mt76x2u_mac_setaddr(struct mt76x2_dev *dev, u8 *addr);
+int mt76x2u_mac_reset(struct mt76x2_dev *dev);
+void mt76x2u_mac_resume(struct mt76x2_dev *dev);
+int mt76x2u_mac_start(struct mt76x2_dev *dev);
+int mt76x2u_mac_stop(struct mt76x2_dev *dev);
+
+int mt76x2u_phy_set_channel(struct mt76x2_dev *dev,
+ struct cfg80211_chan_def *chandef);
+void mt76x2u_phy_calibrate(struct work_struct *work);
+void mt76x2u_phy_channel_calibrate(struct mt76x2_dev *dev);
+void mt76x2u_phy_set_txdac(struct mt76x2_dev *dev);
+void mt76x2u_phy_set_rxpath(struct mt76x2_dev *dev);
+
+void mt76x2u_mcu_complete_urb(struct urb *urb);
+int mt76x2u_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
+ u8 bw_index, bool scan);
+int mt76x2u_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
+ u32 val);
+int mt76x2u_mcu_tssi_comp(struct mt76x2_dev *dev,
+ struct mt76x2_tssi_comp *tssi_data);
+int mt76x2u_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
+ bool force);
+int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap,
+ bool ext, int rssi, u32 false_cca);
+int mt76x2u_mcu_set_radio_state(struct mt76x2_dev *dev, bool val);
+int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type,
+ u8 temp_level, u8 channel);
+int mt76x2u_mcu_init(struct mt76x2_dev *dev);
+int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev);
+void mt76x2u_mcu_deinit(struct mt76x2_dev *dev);
+
+int mt76x2u_alloc_queues(struct mt76x2_dev *dev);
+void mt76x2u_queues_deinit(struct mt76x2_dev *dev);
+void mt76x2u_stop_queues(struct mt76x2_dev *dev);
+bool mt76x2u_tx_status_data(struct mt76_dev *mdev, u8 *update);
+int mt76x2u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
+ struct sk_buff *skb, struct mt76_queue *q,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+ u32 *tx_info);
+void mt76x2u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+ struct mt76_queue_entry *e, bool flush);
+int mt76x2u_skb_dma_info(struct sk_buff *skb, enum dma_msg_port port,
+ u32 flags);
+
+#endif /* __MT76x2U_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c
new file mode 100644
index 000000000000..1ca5dd05b265
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2u.h"
+#include "dma.h"
+
+static void mt76x2u_remove_dma_hdr(struct sk_buff *skb)
+{
+ int hdr_len;
+
+ skb_pull(skb, sizeof(struct mt76x2_txwi) + MT_DMA_HDR_LEN);
+ hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+ if (hdr_len % 4) {
+ memmove(skb->data + 2, skb->data, hdr_len);
+ skb_pull(skb, 2);
+ }
+}
+
+static int
+mt76x2u_check_skb_rooms(struct sk_buff *skb)
+{
+ int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+ u32 need_head;
+
+ need_head = sizeof(struct mt76x2_txwi) + MT_DMA_HDR_LEN;
+ if (hdr_len % 4)
+ need_head += 2;
+ return skb_cow(skb, need_head);
+}
+
+static int
+mt76x2u_set_txinfo(struct sk_buff *skb,
+ struct mt76_wcid *wcid, u8 ep)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ enum mt76x2_qsel qsel;
+ u32 flags;
+
+ if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
+ ep == MT_EP_OUT_HCCA)
+ qsel = MT_QSEL_MGMT;
+ else
+ qsel = MT_QSEL_EDCA;
+
+ flags = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
+ MT_TXD_INFO_80211;
+ if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
+ flags |= MT_TXD_INFO_WIV;
+
+ return mt76u_skb_dma_info(skb, WLAN_PORT, flags);
+}
+
+bool mt76x2u_tx_status_data(struct mt76_dev *mdev, u8 *update)
+{
+ struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+ struct mt76x2_tx_status stat;
+
+ if (!mt76x2_mac_load_tx_status(dev, &stat))
+ return false;
+
+ mt76x2_send_tx_status(dev, &stat, update);
+
+ return true;
+}
+
+int mt76x2u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
+ struct sk_buff *skb, struct mt76_queue *q,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+ u32 *tx_info)
+{
+ struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+ struct mt76x2_txwi *txwi;
+ int err, len = skb->len;
+
+ err = mt76x2u_check_skb_rooms(skb);
+ if (err < 0)
+ return -ENOMEM;
+
+ mt76x2_insert_hdr_pad(skb);
+
+ txwi = skb_push(skb, sizeof(struct mt76x2_txwi));
+ mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
+
+ return mt76x2u_set_txinfo(skb, wcid, q2ep(q->hw_idx));
+}
+
+void mt76x2u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+ struct mt76_queue_entry *e, bool flush)
+{
+ struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+
+ mt76x2u_remove_dma_hdr(e->skb);
+ mt76x2_tx_complete(dev, e->skb);
+}
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c
new file mode 100644
index 000000000000..9b81e7641c06
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+
+#include "mt76x2u.h"
+#include "mt76x2_eeprom.h"
+
+static void mt76x2u_init_dma(struct mt76x2_dev *dev)
+{
+ u32 val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
+
+ val |= MT_USB_DMA_CFG_RX_DROP_OR_PAD |
+ MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN;
+
+ /* disable AGGR_BULK_RX in order to receive one
+ * frame in each rx urb and avoid copies
+ */
+ val &= ~MT_USB_DMA_CFG_RX_BULK_AGG_EN;
+ mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
+}
+
+static void mt76x2u_power_on_rf_patch(struct mt76x2_dev *dev)
+{
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) | BIT(16));
+ udelay(1);
+
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x1c), 0xff);
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x1c), 0x30);
+
+ mt76_wr(dev, MT_VEND_ADDR(CFG, 0x14), 0x484f);
+ udelay(1);
+
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(17));
+ usleep_range(150, 200);
+
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x130), BIT(16));
+ usleep_range(50, 100);
+
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x14c), BIT(19) | BIT(20));
+}
+
+static void mt76x2u_power_on_rf(struct mt76x2_dev *dev, int unit)
+{
+ int shift = unit ? 8 : 0;
+ u32 val = (BIT(1) | BIT(3) | BIT(4) | BIT(5)) << shift;
+
+ /* Enable RF BG */
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) << shift);
+ usleep_range(10, 20);
+
+ /* Enable RFDIG LDO/AFE/ABB/ADDA */
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), val);
+ usleep_range(10, 20);
+
+ /* Switch RFDIG power to internal LDO */
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x130), BIT(2) << shift);
+ usleep_range(10, 20);
+
+ mt76x2u_power_on_rf_patch(dev);
+
+ mt76_set(dev, 0x530, 0xf);
+}
+
+static void mt76x2u_power_on(struct mt76x2_dev *dev)
+{
+ u32 val;
+
+ /* Turn on WL MTCMOS */
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x148),
+ MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP);
+
+ val = MT_WLAN_MTC_CTRL_STATE_UP |
+ MT_WLAN_MTC_CTRL_PWR_ACK |
+ MT_WLAN_MTC_CTRL_PWR_ACK_S;
+
+ mt76_poll(dev, MT_VEND_ADDR(CFG, 0x148), val, val, 1000);
+
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0x7f << 16);
+ usleep_range(10, 20);
+
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0xf << 24);
+ usleep_range(10, 20);
+
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x148), 0xf << 24);
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0xfff);
+
+ /* Turn on AD/DA power down */
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x1204), BIT(3));
+
+ /* WLAN function enable */
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x80), BIT(0));
+
+ /* Release BBP software reset */
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x64), BIT(18));
+
+ mt76x2u_power_on_rf(dev, 0);
+ mt76x2u_power_on_rf(dev, 1);
+}
+
+static int mt76x2u_init_eeprom(struct mt76x2_dev *dev)
+{
+ u32 val, i;
+
+ dev->mt76.eeprom.data = devm_kzalloc(dev->mt76.dev,
+ MT7612U_EEPROM_SIZE,
+ GFP_KERNEL);
+ dev->mt76.eeprom.size = MT7612U_EEPROM_SIZE;
+ if (!dev->mt76.eeprom.data)
+ return -ENOMEM;
+
+ for (i = 0; i + 4 <= MT7612U_EEPROM_SIZE; i += 4) {
+ val = mt76_rr(dev, MT_VEND_ADDR(EEPROM, i));
+ put_unaligned_le32(val, dev->mt76.eeprom.data + i);
+ }
+
+ mt76x2_eeprom_parse_hw_cap(dev);
+ return 0;
+}
+
+struct mt76x2_dev *mt76x2u_alloc_device(struct device *pdev)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ .tx_prepare_skb = mt76x2u_tx_prepare_skb,
+ .tx_complete_skb = mt76x2u_tx_complete_skb,
+ .tx_status_data = mt76x2u_tx_status_data,
+ .rx_skb = mt76x2_queue_rx_skb,
+ };
+ struct mt76x2_dev *dev;
+ struct mt76_dev *mdev;
+
+ mdev = mt76_alloc_device(sizeof(*dev), &mt76x2u_ops);
+ if (!mdev)
+ return NULL;
+
+ dev = container_of(mdev, struct mt76x2_dev, mt76);
+ mdev->dev = pdev;
+ mdev->drv = &drv_ops;
+
+ mutex_init(&dev->mutex);
+
+ return dev;
+}
+
+static void mt76x2u_init_beacon_offsets(struct mt76x2_dev *dev)
+{
+ mt76_wr(dev, MT_BCN_OFFSET(0), 0x18100800);
+ mt76_wr(dev, MT_BCN_OFFSET(1), 0x38302820);
+ mt76_wr(dev, MT_BCN_OFFSET(2), 0x58504840);
+ mt76_wr(dev, MT_BCN_OFFSET(3), 0x78706860);
+}
+
+int mt76x2u_init_hardware(struct mt76x2_dev *dev)
+{
+ static const u16 beacon_offsets[] = {
+ /* 512 byte per beacon */
+ 0xc000, 0xc200, 0xc400, 0xc600,
+ 0xc800, 0xca00, 0xcc00, 0xce00,
+ 0xd000, 0xd200, 0xd400, 0xd600,
+ 0xd800, 0xda00, 0xdc00, 0xde00
+ };
+ const struct mt76_wcid_addr addr = {
+ .macaddr = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ .ba_mask = 0,
+ };
+ int i, err;
+
+ dev->beacon_offsets = beacon_offsets;
+
+ mt76x2_reset_wlan(dev, true);
+ mt76x2u_power_on(dev);
+
+ if (!mt76x2_wait_for_mac(dev))
+ return -ETIMEDOUT;
+
+ err = mt76x2u_mcu_fw_init(dev);
+ if (err < 0)
+ return err;
+
+ if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100))
+ return -EIO;
+
+ /* wait for asic ready after fw load. */
+ if (!mt76x2_wait_for_mac(dev))
+ return -ETIMEDOUT;
+
+ mt76_wr(dev, MT_HEADER_TRANS_CTRL_REG, 0);
+ mt76_wr(dev, MT_TSO_CTRL, 0);
+
+ mt76x2u_init_dma(dev);
+
+ err = mt76x2u_mcu_init(dev);
+ if (err < 0)
+ return err;
+
+ err = mt76x2u_mac_reset(dev);
+ if (err < 0)
+ return err;
+
+ mt76x2u_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
+ dev->rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
+
+ mt76x2u_init_beacon_offsets(dev);
+
+ if (!mt76x2_wait_for_bbp(dev))
+ return -ETIMEDOUT;
+
+ /* reset wcid table */
+ for (i = 0; i < 254; i++)
+ mt76_wr_copy(dev, MT_WCID_ADDR(i), &addr,
+ sizeof(struct mt76_wcid_addr));
+
+ /* reset shared key table and pairwise key table */
+ for (i = 0; i < 4; i++)
+ mt76_wr(dev, MT_SKEY_MODE_BASE_0 + 4 * i, 0);
+ for (i = 0; i < 256; i++)
+ mt76_wr(dev, MT_WCID_ATTR(i), 1);
+
+ mt76_clear(dev, MT_BEACON_TIME_CFG,
+ MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE |
+ MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_BEACON_TX);
+
+ mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
+ mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x583f);
+
+ err = mt76x2u_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0);
+ if (err < 0)
+ return err;
+
+ mt76x2u_phy_set_rxpath(dev);
+ mt76x2u_phy_set_txdac(dev);
+
+ return mt76x2u_mac_stop(dev);
+}
+
+int mt76x2u_register_device(struct mt76x2_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct wiphy *wiphy = hw->wiphy;
+ int err;
+
+ INIT_DELAYED_WORK(&dev->cal_work, mt76x2u_phy_calibrate);
+ mt76x2_init_device(dev);
+
+ err = mt76x2u_init_eeprom(dev);
+ if (err < 0)
+ return err;
+
+ err = mt76u_mcu_init_rx(&dev->mt76);
+ if (err < 0)
+ return err;
+
+ err = mt76u_alloc_queues(&dev->mt76);
+ if (err < 0)
+ goto fail;
+
+ err = mt76x2u_init_hardware(dev);
+ if (err < 0)
+ goto fail;
+
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+
+ err = mt76_register_device(&dev->mt76, true, mt76x2_rates,
+ ARRAY_SIZE(mt76x2_rates));
+ if (err)
+ goto fail;
+
+ /* check hw sg support in order to enable AMSDU */
+ if (mt76u_check_sg(&dev->mt76))
+ hw->max_tx_fragments = MT_SG_MAX_SIZE;
+ else
+ hw->max_tx_fragments = 1;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+ mt76x2_init_debugfs(dev);
+ mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband);
+ mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband);
+
+ return 0;
+
+fail:
+ mt76x2u_cleanup(dev);
+ return err;
+}
+
+void mt76x2u_stop_hw(struct mt76x2_dev *dev)
+{
+ mt76u_stop_stat_wk(&dev->mt76);
+ cancel_delayed_work_sync(&dev->cal_work);
+ mt76x2u_mac_stop(dev);
+}
+
+void mt76x2u_cleanup(struct mt76x2_dev *dev)
+{
+ mt76x2u_mcu_set_radio_state(dev, false);
+ mt76x2u_stop_hw(dev);
+ mt76u_queues_deinit(&dev->mt76);
+ mt76x2u_mcu_deinit(dev);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c
new file mode 100644
index 000000000000..eab7ab297aa6
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2u.h"
+#include "mt76x2_eeprom.h"
+
+static void mt76x2u_mac_reset_counters(struct mt76x2_dev *dev)
+{
+ mt76_rr(dev, MT_RX_STAT_0);
+ mt76_rr(dev, MT_RX_STAT_1);
+ mt76_rr(dev, MT_RX_STAT_2);
+ mt76_rr(dev, MT_TX_STA_0);
+ mt76_rr(dev, MT_TX_STA_1);
+ mt76_rr(dev, MT_TX_STA_2);
+}
+
+static void mt76x2u_mac_fixup_xtal(struct mt76x2_dev *dev)
+{
+ s8 offset = 0;
+ u16 eep_val;
+
+ eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_2);
+
+ offset = eep_val & 0x7f;
+ if ((eep_val & 0xff) == 0xff)
+ offset = 0;
+ else if (eep_val & 0x80)
+ offset = 0 - offset;
+
+ eep_val >>= 8;
+ if (eep_val == 0x00 || eep_val == 0xff) {
+ eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_1);
+ eep_val &= 0xff;
+
+ if (eep_val == 0x00 || eep_val == 0xff)
+ eep_val = 0x14;
+ }
+
+ eep_val &= 0x7f;
+ mt76_rmw_field(dev, MT_VEND_ADDR(CFG, MT_XO_CTRL5),
+ MT_XO_CTRL5_C2_VAL, eep_val + offset);
+ mt76_set(dev, MT_VEND_ADDR(CFG, MT_XO_CTRL6), MT_XO_CTRL6_C2_CTRL);
+
+ mt76_wr(dev, 0x504, 0x06000000);
+ mt76_wr(dev, 0x50c, 0x08800000);
+ mdelay(5);
+ mt76_wr(dev, 0x504, 0x0);
+
+ /* decrease SIFS from 16us to 13us */
+ mt76_rmw_field(dev, MT_XIFS_TIME_CFG,
+ MT_XIFS_TIME_CFG_OFDM_SIFS, 0xd);
+ mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG, MT_BKOFF_SLOT_CFG_CC_DELAY, 1);
+
+ /* init fce */
+ mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
+
+ eep_val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2);
+ switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) {
+ case 0:
+ mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80);
+ break;
+ case 1:
+ mt76_wr(dev, MT_XO_CTRL7, 0x5c1feed0);
+ break;
+ default:
+ break;
+ }
+}
+
+int mt76x2u_mac_reset(struct mt76x2_dev *dev)
+{
+ mt76_wr(dev, MT_WPDMA_GLO_CFG, BIT(4) | BIT(5));
+
+ /* init pbf regs */
+ mt76_wr(dev, MT_PBF_TX_MAX_PCNT, 0xefef3f1f);
+ mt76_wr(dev, MT_PBF_RX_MAX_PCNT, 0xfebf);
+
+ mt76_write_mac_initvals(dev);
+
+ mt76_wr(dev, MT_TX_LINK_CFG, 0x1020);
+ mt76_wr(dev, MT_AUTO_RSP_CFG, 0x13);
+ mt76_wr(dev, MT_MAX_LEN_CFG, 0x2f00);
+ mt76_wr(dev, MT_TX_RTS_CFG, 0x92b20);
+
+ mt76_wr(dev, MT_WMM_AIFSN, 0x2273);
+ mt76_wr(dev, MT_WMM_CWMIN, 0x2344);
+ mt76_wr(dev, MT_WMM_CWMAX, 0x34aa);
+
+ mt76_clear(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_RESET_CSR |
+ MT_MAC_SYS_CTRL_RESET_BBP);
+
+ if (is_mt7612(dev))
+ mt76_clear(dev, MT_COEXCFG0, MT_COEXCFG0_COEX_EN);
+
+ mt76_set(dev, MT_EXT_CCA_CFG, 0xf000);
+ mt76_clear(dev, MT_TX_ALC_CFG_4, BIT(31));
+
+ mt76x2u_mac_fixup_xtal(dev);
+
+ return 0;
+}
+
+int mt76x2u_mac_start(struct mt76x2_dev *dev)
+{
+ mt76x2u_mac_reset_counters(dev);
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+ wait_for_wpdma(dev);
+ usleep_range(50, 100);
+
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX |
+ MT_MAC_SYS_CTRL_ENABLE_RX);
+
+ return 0;
+}
+
+int mt76x2u_mac_stop(struct mt76x2_dev *dev)
+{
+ int i, count = 0, val;
+ bool stopped = false;
+ u32 rts_cfg;
+
+ if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ return -EIO;
+
+ rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
+ mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
+
+ mt76_clear(dev, MT_TXOP_CTRL_CFG, BIT(20));
+ mt76_clear(dev, MT_TXOP_HLDR_ET, BIT(1));
+
+ /* wait tx dma to stop */
+ for (i = 0; i < 2000; i++) {
+ val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
+ if (!(val & MT_USB_DMA_CFG_TX_BUSY) && i > 10)
+ break;
+ usleep_range(50, 100);
+ }
+
+ /* page count on TxQ */
+ for (i = 0; i < 200; i++) {
+ if (!(mt76_rr(dev, 0x0438) & 0xffffffff) &&
+ !(mt76_rr(dev, 0x0a30) & 0x000000ff) &&
+ !(mt76_rr(dev, 0x0a34) & 0xff00ff00))
+ break;
+ usleep_range(10, 20);
+ }
+
+ /* disable tx-rx */
+ mt76_clear(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_RX |
+ MT_MAC_SYS_CTRL_ENABLE_TX);
+
+ /* Wait for MAC to become idle */
+ for (i = 0; i < 1000; i++) {
+ if (!(mt76_rr(dev, MT_MAC_STATUS) & MT_MAC_STATUS_TX) &&
+ !mt76_rr(dev, MT_BBP(IBI, 12))) {
+ stopped = true;
+ break;
+ }
+ usleep_range(10, 20);
+ }
+
+ if (!stopped) {
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
+
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
+ }
+
+ /* page count on RxQ */
+ for (i = 0; i < 200; i++) {
+ if (!(mt76_rr(dev, 0x0430) & 0x00ff0000) &&
+ !(mt76_rr(dev, 0x0a30) & 0xffffffff) &&
+ !(mt76_rr(dev, 0x0a34) & 0xffffffff) &&
+ ++count > 10)
+ break;
+ msleep(50);
+ }
+
+ if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 2000))
+ dev_warn(dev->mt76.dev, "MAC RX failed to stop\n");
+
+ /* wait rx dma to stop */
+ for (i = 0; i < 2000; i++) {
+ val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
+ if (!(val & MT_USB_DMA_CFG_RX_BUSY) && i > 10)
+ break;
+ usleep_range(50, 100);
+ }
+
+ mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
+
+ return 0;
+}
+
+void mt76x2u_mac_resume(struct mt76x2_dev *dev)
+{
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX |
+ MT_MAC_SYS_CTRL_ENABLE_RX);
+ mt76_set(dev, MT_TXOP_CTRL_CFG, BIT(20));
+ mt76_set(dev, MT_TXOP_HLDR_ET, BIT(1));
+}
+
+void mt76x2u_mac_setaddr(struct mt76x2_dev *dev, u8 *addr)
+{
+ ether_addr_copy(dev->mt76.macaddr, addr);
+
+ if (!is_valid_ether_addr(dev->mt76.macaddr)) {
+ eth_random_addr(dev->mt76.macaddr);
+ dev_info(dev->mt76.dev,
+ "Invalid MAC address, using random address %pM\n",
+ dev->mt76.macaddr);
+ }
+
+ mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->mt76.macaddr));
+ mt76_wr(dev, MT_MAC_ADDR_DW1,
+ get_unaligned_le16(dev->mt76.macaddr + 4) |
+ FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
+}
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c
new file mode 100644
index 000000000000..7367ba111119
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2u.h"
+
+static int mt76x2u_start(struct ieee80211_hw *hw)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ int ret;
+
+ mutex_lock(&dev->mutex);
+
+ ret = mt76x2u_mac_start(dev);
+ if (ret)
+ goto out;
+
+ set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+
+out:
+ mutex_unlock(&dev->mutex);
+ return ret;
+}
+
+static void mt76x2u_stop(struct ieee80211_hw *hw)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mutex);
+ clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ mt76x2u_stop_hw(dev);
+ mutex_unlock(&dev->mutex);
+}
+
+static int mt76x2u_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_vif *mvif = (struct mt76x2_vif *)vif->drv_priv;
+ unsigned int idx = 0;
+
+ if (!ether_addr_equal(dev->mt76.macaddr, vif->addr))
+ mt76x2u_mac_setaddr(dev, vif->addr);
+
+ mvif->idx = idx;
+ mvif->group_wcid.idx = MT_VIF_WCID(idx);
+ mvif->group_wcid.hw_key_idx = -1;
+ mt76x2_txq_init(dev, vif->txq);
+
+ return 0;
+}
+
+static int
+mt76x2u_set_channel(struct mt76x2_dev *dev,
+ struct cfg80211_chan_def *chandef)
+{
+ int err;
+
+ cancel_delayed_work_sync(&dev->cal_work);
+ set_bit(MT76_RESET, &dev->mt76.state);
+
+ mt76_set_channel(&dev->mt76);
+
+ mt76_clear(dev, MT_TXOP_CTRL_CFG, BIT(20));
+ mt76_clear(dev, MT_TXOP_HLDR_ET, BIT(1));
+ mt76x2_mac_stop(dev, false);
+
+ err = mt76x2u_phy_set_channel(dev, chandef);
+
+ mt76x2u_mac_resume(dev);
+
+ clear_bit(MT76_RESET, &dev->mt76.state);
+ mt76_txq_schedule_all(&dev->mt76);
+
+ return err;
+}
+
+static void
+mt76x2u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mutex);
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ mt76x2u_phy_channel_calibrate(dev);
+ mt76x2_apply_gain_adj(dev);
+ }
+
+ if (changed & BSS_CHANGED_BSSID) {
+ mt76_wr(dev, MT_MAC_BSSID_DW0,
+ get_unaligned_le32(info->bssid));
+ mt76_wr(dev, MT_MAC_BSSID_DW1,
+ get_unaligned_le16(info->bssid + 4));
+ }
+
+ mutex_unlock(&dev->mutex);
+}
+
+static int
+mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ int err = 0;
+
+ mutex_lock(&dev->mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+ dev->rxfilter |= MT_RX_FILTR_CFG_PROMISC;
+ else
+ dev->rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ieee80211_stop_queues(hw);
+ err = mt76x2u_set_channel(dev, &hw->conf.chandef);
+ ieee80211_wake_queues(hw);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ dev->txpower_conf = hw->conf.power_level * 2;
+
+ /* convert to per-chain power for 2x2 devices */
+ dev->txpower_conf -= 6;
+
+ if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+ mt76x2_phy_set_txpower(dev);
+ }
+
+ mutex_unlock(&dev->mutex);
+
+ return err;
+}
+
+static void
+mt76x2u_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ const u8 *mac)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ set_bit(MT76_SCANNING, &dev->mt76.state);
+}
+
+static void
+mt76x2u_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ clear_bit(MT76_SCANNING, &dev->mt76.state);
+}
+
+const struct ieee80211_ops mt76x2u_ops = {
+ .tx = mt76x2_tx,
+ .start = mt76x2u_start,
+ .stop = mt76x2u_stop,
+ .add_interface = mt76x2u_add_interface,
+ .remove_interface = mt76x2_remove_interface,
+ .sta_add = mt76x2_sta_add,
+ .sta_remove = mt76x2_sta_remove,
+ .set_key = mt76x2_set_key,
+ .ampdu_action = mt76x2_ampdu_action,
+ .config = mt76x2u_config,
+ .wake_tx_queue = mt76_wake_tx_queue,
+ .bss_info_changed = mt76x2u_bss_info_changed,
+ .configure_filter = mt76x2_configure_filter,
+ .conf_tx = mt76x2_conf_tx,
+ .sw_scan_start = mt76x2u_sw_scan,
+ .sw_scan_complete = mt76x2u_sw_scan_complete,
+ .sta_rate_tbl_update = mt76x2_sta_rate_tbl_update,
+};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c
new file mode 100644
index 000000000000..22c16d638baa
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c
@@ -0,0 +1,463 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/firmware.h>
+
+#include "mt76x2u.h"
+#include "mt76x2_eeprom.h"
+
+#define MT_CMD_HDR_LEN 4
+#define MT_INBAND_PACKET_MAX_LEN 192
+#define MT_MCU_MEMMAP_WLAN 0x410000
+
+#define MCU_FW_URB_MAX_PAYLOAD 0x3900
+#define MCU_ROM_PATCH_MAX_PAYLOAD 2048
+
+#define MT76U_MCU_ILM_OFFSET 0x80000
+#define MT76U_MCU_DLM_OFFSET 0x110000
+#define MT76U_MCU_ROM_PATCH_OFFSET 0x90000
+
+static int
+mt76x2u_mcu_function_select(struct mt76x2_dev *dev, enum mcu_function func,
+ u32 val)
+{
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(func),
+ .value = cpu_to_le32(val),
+ };
+ struct sk_buff *skb;
+
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_FUN_SET_OP,
+ func != Q_SELECT);
+}
+
+int mt76x2u_mcu_set_radio_state(struct mt76x2_dev *dev, bool val)
+{
+ struct {
+ __le32 mode;
+ __le32 level;
+ } __packed __aligned(4) msg = {
+ .mode = cpu_to_le32(val ? RADIO_ON : RADIO_OFF),
+ .level = cpu_to_le32(0),
+ };
+ struct sk_buff *skb;
+
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_POWER_SAVING_OP,
+ false);
+}
+
+int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
+ u8 channel)
+{
+ struct {
+ u8 cr_mode;
+ u8 temp;
+ u8 ch;
+ u8 _pad0;
+ __le32 cfg;
+ } __packed __aligned(4) msg = {
+ .cr_mode = type,
+ .temp = temp_level,
+ .ch = channel,
+ };
+ struct sk_buff *skb;
+ u32 val;
+
+ val = BIT(31);
+ val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
+ val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
+ msg.cfg = cpu_to_le32(val);
+
+ /* first set the channel without the extension channel info */
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_LOAD_CR, true);
+}
+
+int mt76x2u_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
+ u8 bw_index, bool scan)
+{
+ struct {
+ u8 idx;
+ u8 scan;
+ u8 bw;
+ u8 _pad0;
+
+ __le16 chainmask;
+ u8 ext_chan;
+ u8 _pad1;
+
+ } __packed __aligned(4) msg = {
+ .idx = channel,
+ .scan = scan,
+ .bw = bw,
+ .chainmask = cpu_to_le16(dev->chainmask),
+ };
+ struct sk_buff *skb;
+
+ /* first set the channel without the extension channel info */
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+
+ mt76u_mcu_send_msg(&dev->mt76, skb, CMD_SWITCH_CHANNEL_OP, true);
+
+ usleep_range(5000, 10000);
+
+ msg.ext_chan = 0xe0 + bw_index;
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_SWITCH_CHANNEL_OP, true);
+}
+
+int mt76x2u_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
+ u32 val)
+{
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(type),
+ .value = cpu_to_le32(val),
+ };
+ struct sk_buff *skb;
+
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_CALIBRATION_OP, true);
+}
+
+int mt76x2u_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
+ bool force)
+{
+ struct {
+ __le32 channel;
+ __le32 gain_val;
+ } __packed __aligned(4) msg = {
+ .channel = cpu_to_le32(channel),
+ .gain_val = cpu_to_le32(gain),
+ };
+ struct sk_buff *skb;
+
+ if (force)
+ msg.channel |= cpu_to_le32(BIT(31));
+
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_INIT_GAIN_OP, true);
+}
+
+int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap,
+ bool ext, int rssi, u32 false_cca)
+{
+ struct {
+ __le32 channel;
+ __le32 rssi_val;
+ __le32 false_cca_val;
+ } __packed __aligned(4) msg = {
+ .rssi_val = cpu_to_le32(rssi),
+ .false_cca_val = cpu_to_le32(false_cca),
+ };
+ struct sk_buff *skb;
+ u32 val = channel;
+
+ if (ap)
+ val |= BIT(31);
+ if (ext)
+ val |= BIT(30);
+ msg.channel = cpu_to_le32(val);
+
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_DYNC_VGA_OP, true);
+}
+
+int mt76x2u_mcu_tssi_comp(struct mt76x2_dev *dev,
+ struct mt76x2_tssi_comp *tssi_data)
+{
+ struct {
+ __le32 id;
+ struct mt76x2_tssi_comp data;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(MCU_CAL_TSSI_COMP),
+ .data = *tssi_data,
+ };
+ struct sk_buff *skb;
+
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_CALIBRATION_OP, true);
+}
+
+static void mt76x2u_mcu_load_ivb(struct mt76x2_dev *dev)
+{
+ mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ 0x12, 0, NULL, 0);
+}
+
+static void mt76x2u_mcu_enable_patch(struct mt76x2_dev *dev)
+{
+ struct mt76_usb *usb = &dev->mt76.usb;
+ const u8 data[] = {
+ 0x6f, 0xfc, 0x08, 0x01,
+ 0x20, 0x04, 0x00, 0x00,
+ 0x00, 0x09, 0x00,
+ };
+
+ memcpy(usb->data, data, sizeof(data));
+ mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+ USB_DIR_OUT | USB_TYPE_CLASS,
+ 0x12, 0, usb->data, sizeof(data));
+}
+
+static void mt76x2u_mcu_reset_wmt(struct mt76x2_dev *dev)
+{
+ struct mt76_usb *usb = &dev->mt76.usb;
+ u8 data[] = {
+ 0x6f, 0xfc, 0x05, 0x01,
+ 0x07, 0x01, 0x00, 0x04
+ };
+
+ memcpy(usb->data, data, sizeof(data));
+ mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+ USB_DIR_OUT | USB_TYPE_CLASS,
+ 0x12, 0, usb->data, sizeof(data));
+}
+
+static int mt76x2u_mcu_load_rom_patch(struct mt76x2_dev *dev)
+{
+ bool rom_protect = !is_mt7612(dev);
+ struct mt76x2_patch_header *hdr;
+ u32 val, patch_mask, patch_reg;
+ const struct firmware *fw;
+ int err;
+
+ if (rom_protect &&
+ !mt76_poll_msec(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) {
+ dev_err(dev->mt76.dev,
+ "could not get hardware semaphore for ROM PATCH\n");
+ return -ETIMEDOUT;
+ }
+
+ if (mt76xx_rev(dev) >= MT76XX_REV_E3) {
+ patch_mask = BIT(0);
+ patch_reg = MT_MCU_CLOCK_CTL;
+ } else {
+ patch_mask = BIT(1);
+ patch_reg = MT_MCU_COM_REG0;
+ }
+
+ if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) {
+ dev_info(dev->mt76.dev, "ROM patch already applied\n");
+ return 0;
+ }
+
+ err = request_firmware(&fw, MT7662U_ROM_PATCH, dev->mt76.dev);
+ if (err < 0)
+ return err;
+
+ if (!fw || !fw->data || fw->size <= sizeof(*hdr)) {
+ dev_err(dev->mt76.dev, "failed to load firmware\n");
+ err = -EIO;
+ goto out;
+ }
+
+ hdr = (struct mt76x2_patch_header *)fw->data;
+ dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time);
+
+ /* enable USB_DMA_CFG */
+ val = MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN |
+ FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20);
+ mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
+
+ /* vendor reset */
+ mt76u_mcu_fw_reset(&dev->mt76);
+ usleep_range(5000, 10000);
+
+ /* enable FCE to send in-band cmd */
+ mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
+ /* FCE tx_fs_base_ptr */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+ /* FCE tx_fs_max_cnt */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 0x1);
+ /* FCE pdma enable */
+ mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+ /* FCE skip_fs_en */
+ mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
+
+ err = mt76u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr),
+ fw->size - sizeof(*hdr),
+ MCU_ROM_PATCH_MAX_PAYLOAD,
+ MT76U_MCU_ROM_PATCH_OFFSET);
+ if (err < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ mt76x2u_mcu_enable_patch(dev);
+ mt76x2u_mcu_reset_wmt(dev);
+ mdelay(20);
+
+ if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 100)) {
+ dev_err(dev->mt76.dev, "failed to load ROM patch\n");
+ err = -ETIMEDOUT;
+ }
+
+out:
+ if (rom_protect)
+ mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1);
+ release_firmware(fw);
+ return err;
+}
+
+static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev)
+{
+ u32 val, dlm_offset = MT76U_MCU_DLM_OFFSET;
+ const struct mt76x2_fw_header *hdr;
+ int err, len, ilm_len, dlm_len;
+ const struct firmware *fw;
+
+ err = request_firmware(&fw, MT7662U_FIRMWARE, dev->mt76.dev);
+ if (err < 0)
+ return err;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const struct mt76x2_fw_header *)fw->data;
+ ilm_len = le32_to_cpu(hdr->ilm_len);
+ dlm_len = le32_to_cpu(hdr->dlm_len);
+ len = sizeof(*hdr) + ilm_len + dlm_len;
+ if (fw->size != len) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ val = le16_to_cpu(hdr->fw_ver);
+ dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
+ (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
+
+ val = le16_to_cpu(hdr->build_ver);
+ dev_info(dev->mt76.dev, "Build: %x\n", val);
+ dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
+
+ /* vendor reset */
+ mt76u_mcu_fw_reset(&dev->mt76);
+ usleep_range(5000, 10000);
+
+ /* enable USB_DMA_CFG */
+ val = MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN |
+ FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20);
+ mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
+ /* enable FCE to send in-band cmd */
+ mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
+ /* FCE tx_fs_base_ptr */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+ /* FCE tx_fs_max_cnt */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 0x1);
+ /* FCE pdma enable */
+ mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+ /* FCE skip_fs_en */
+ mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
+
+ /* load ILM */
+ err = mt76u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr),
+ ilm_len, MCU_FW_URB_MAX_PAYLOAD,
+ MT76U_MCU_ILM_OFFSET);
+ if (err < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ /* load DLM */
+ if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+ dlm_offset += 0x800;
+ err = mt76u_mcu_fw_send_data(&dev->mt76,
+ fw->data + sizeof(*hdr) + ilm_len,
+ dlm_len, MCU_FW_URB_MAX_PAYLOAD,
+ dlm_offset);
+ if (err < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ mt76x2u_mcu_load_ivb(dev);
+ if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 100)) {
+ dev_err(dev->mt76.dev, "firmware failed to start\n");
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ mt76_set(dev, MT_MCU_COM_REG0, BIT(1));
+ /* enable FCE to send in-band cmd */
+ mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
+ dev_dbg(dev->mt76.dev, "firmware running\n");
+
+out:
+ release_firmware(fw);
+ return err;
+}
+
+int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev)
+{
+ int err;
+
+ err = mt76x2u_mcu_load_rom_patch(dev);
+ if (err < 0)
+ return err;
+
+ return mt76x2u_mcu_load_firmware(dev);
+}
+
+int mt76x2u_mcu_init(struct mt76x2_dev *dev)
+{
+ int err;
+
+ err = mt76x2u_mcu_function_select(dev, Q_SELECT, 1);
+ if (err < 0)
+ return err;
+
+ return mt76x2u_mcu_set_radio_state(dev, true);
+}
+
+void mt76x2u_mcu_deinit(struct mt76x2_dev *dev)
+{
+ struct mt76_usb *usb = &dev->mt76.usb;
+
+ usb_kill_urb(usb->mcu.res.urb);
+ mt76u_buf_free(&usb->mcu.res);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c
new file mode 100644
index 000000000000..5158063d0c2e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2u.h"
+#include "mt76x2_eeprom.h"
+
+void mt76x2u_phy_set_rxpath(struct mt76x2_dev *dev)
+{
+ u32 val;
+
+ val = mt76_rr(dev, MT_BBP(AGC, 0));
+ val &= ~BIT(4);
+
+ switch (dev->chainmask & 0xf) {
+ case 2:
+ val |= BIT(3);
+ break;
+ default:
+ val &= ~BIT(3);
+ break;
+ }
+ mt76_wr(dev, MT_BBP(AGC, 0), val);
+}
+
+void mt76x2u_phy_set_txdac(struct mt76x2_dev *dev)
+{
+ int txpath;
+
+ txpath = (dev->chainmask >> 8) & 0xf;
+ switch (txpath) {
+ case 2:
+ mt76_set(dev, MT_BBP(TXBE, 5), 0x3);
+ break;
+ default:
+ mt76_clear(dev, MT_BBP(TXBE, 5), 0x3);
+ break;
+ }
+}
+
+void mt76x2u_phy_channel_calibrate(struct mt76x2_dev *dev)
+{
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
+
+ if (mt76x2_channel_silent(dev))
+ return;
+
+ mt76x2u_mac_stop(dev);
+
+ if (is_5ghz)
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_LC, 0);
+
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz);
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz);
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0);
+
+ mt76x2u_mac_resume(dev);
+}
+
+static void
+mt76x2u_phy_tssi_compensate(struct mt76x2_dev *dev)
+{
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct mt76x2_tx_power_info txp;
+ struct mt76x2_tssi_comp t = {};
+
+ if (!dev->cal.tssi_cal_done)
+ return;
+
+ if (!dev->cal.tssi_comp_pending) {
+ /* TSSI trigger */
+ t.cal_mode = BIT(0);
+ mt76x2u_mcu_tssi_comp(dev, &t);
+ dev->cal.tssi_comp_pending = true;
+ } else {
+ if (mt76_rr(dev, MT_BBP(CORE, 34)) & BIT(4))
+ return;
+
+ dev->cal.tssi_comp_pending = false;
+ mt76x2_get_power_info(dev, &txp, chan);
+
+ if (mt76x2_ext_pa_enabled(dev, chan->band))
+ t.pa_mode = 1;
+
+ t.cal_mode = BIT(1);
+ t.slope0 = txp.chain[0].tssi_slope;
+ t.offset0 = txp.chain[0].tssi_offset;
+ t.slope1 = txp.chain[1].tssi_slope;
+ t.offset1 = txp.chain[1].tssi_offset;
+ mt76x2u_mcu_tssi_comp(dev, &t);
+
+ if (t.pa_mode || dev->cal.dpd_cal_done)
+ return;
+
+ usleep_range(10000, 20000);
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value);
+ dev->cal.dpd_cal_done = true;
+ }
+}
+
+static void
+mt76x2u_phy_update_channel_gain(struct mt76x2_dev *dev)
+{
+ u8 channel = dev->mt76.chandef.chan->hw_value;
+ int freq, freq1;
+ u32 false_cca;
+
+ freq = dev->mt76.chandef.chan->center_freq;
+ freq1 = dev->mt76.chandef.center_freq1;
+
+ switch (dev->mt76.chandef.width) {
+ case NL80211_CHAN_WIDTH_80: {
+ int ch_group_index;
+
+ ch_group_index = (freq - freq1 + 30) / 20;
+ if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
+ ch_group_index = 0;
+ channel += 6 - ch_group_index * 4;
+ break;
+ }
+ case NL80211_CHAN_WIDTH_40:
+ if (freq1 > freq)
+ channel += 2;
+ else
+ channel -= 2;
+ break;
+ default:
+ break;
+ }
+
+ dev->cal.avg_rssi_all = mt76x2_phy_get_min_avg_rssi(dev);
+ false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS,
+ mt76_rr(dev, MT_RX_STAT_1));
+
+ mt76x2u_mcu_set_dynamic_vga(dev, channel, false, false,
+ dev->cal.avg_rssi_all, false_cca);
+}
+
+void mt76x2u_phy_calibrate(struct work_struct *work)
+{
+ struct mt76x2_dev *dev;
+
+ dev = container_of(work, struct mt76x2_dev, cal_work.work);
+ mt76x2u_phy_tssi_compensate(dev);
+ mt76x2u_phy_update_channel_gain(dev);
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+}
+
+int mt76x2u_phy_set_channel(struct mt76x2_dev *dev,
+ struct cfg80211_chan_def *chandef)
+{
+ u32 ext_cca_chan[4] = {
+ [0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)),
+ [1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)),
+ [2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)),
+ [3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
+ };
+ bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
+ struct ieee80211_channel *chan = chandef->chan;
+ u8 channel = chan->hw_value, bw, bw_index;
+ int ch_group_index, freq, freq1, ret;
+
+ dev->cal.channel_cal_done = false;
+ freq = chandef->chan->center_freq;
+ freq1 = chandef->center_freq1;
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_40:
+ bw = 1;
+ if (freq1 > freq) {
+ bw_index = 1;
+ ch_group_index = 0;
+ } else {
+ bw_index = 3;
+ ch_group_index = 1;
+ }
+ channel += 2 - ch_group_index * 4;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ ch_group_index = (freq - freq1 + 30) / 20;
+ if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
+ ch_group_index = 0;
+ bw = 2;
+ bw_index = ch_group_index;
+ channel += 6 - ch_group_index * 4;
+ break;
+ default:
+ bw = 0;
+ bw_index = 0;
+ ch_group_index = 0;
+ break;
+ }
+
+ mt76x2_read_rx_gain(dev);
+ mt76x2_phy_set_txpower_regs(dev, chan->band);
+ mt76x2_configure_tx_delay(dev, chan->band, bw);
+ mt76x2_phy_set_txpower(dev);
+
+ mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1);
+ mt76x2_phy_set_bw(dev, chandef->width, ch_group_index);
+
+ mt76_rmw(dev, MT_EXT_CCA_CFG,
+ (MT_EXT_CCA_CFG_CCA0 |
+ MT_EXT_CCA_CFG_CCA1 |
+ MT_EXT_CCA_CFG_CCA2 |
+ MT_EXT_CCA_CFG_CCA3 |
+ MT_EXT_CCA_CFG_CCA_MASK),
+ ext_cca_chan[ch_group_index]);
+
+ ret = mt76x2u_mcu_set_channel(dev, channel, bw, bw_index, scan);
+ if (ret)
+ return ret;
+
+ mt76x2u_mcu_init_gain(dev, channel, dev->cal.rx.mcu_gain, true);
+
+ /* Enable LDPC Rx */
+ if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+ mt76_set(dev, MT_BBP(RXO, 13), BIT(10));
+
+ if (!dev->cal.init_cal_done) {
+ u8 val = mt76x2_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
+
+ if (val != 0xff)
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_R, 0);
+ }
+
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel);
+
+ /* Rx LPF calibration */
+ if (!dev->cal.init_cal_done)
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_RC, 0);
+ dev->cal.init_cal_done = true;
+
+ mt76_wr(dev, MT_BBP(AGC, 61), 0xff64a4e2);
+ mt76_wr(dev, MT_BBP(AGC, 7), 0x08081010);
+ mt76_wr(dev, MT_BBP(AGC, 11), 0x00000404);
+ mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
+ mt76_wr(dev, MT_TXOP_CTRL_CFG, 0X04101b3f);
+
+ mt76_set(dev, MT_BBP(TXO, 4), BIT(25));
+ mt76_set(dev, MT_BBP(RXO, 13), BIT(8));
+
+ if (scan)
+ return 0;
+
+ if (mt76x2_tssi_enabled(dev)) {
+ /* init default values for temp compensation */
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
+ 0x38);
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP,
+ 0x38);
+
+ /* init tssi calibration */
+ if (!mt76x2_channel_silent(dev)) {
+ struct ieee80211_channel *chan;
+ u32 flag = 0;
+
+ chan = dev->mt76.chandef.chan;
+ if (chan->band == NL80211_BAND_5GHZ)
+ flag |= BIT(0);
+ if (mt76x2_ext_pa_enabled(dev, chan->band))
+ flag |= BIT(8);
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_TSSI, flag);
+ dev->cal.tssi_cal_done = true;
+ }
+ }
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index e96956710fb2..af48d43bb7dc 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -51,7 +51,7 @@ __mt76_get_txwi(struct mt76_dev *dev)
return t;
}
-static struct mt76_txwi_cache *
+struct mt76_txwi_cache *
mt76_get_txwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
@@ -91,80 +91,6 @@ mt76_txq_get_qid(struct ieee80211_txq *txq)
return txq->ac;
}
-int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta)
-{
- struct mt76_queue_entry e;
- struct mt76_txwi_cache *t;
- struct mt76_queue_buf buf[32];
- struct sk_buff *iter;
- dma_addr_t addr;
- int len;
- u32 tx_info = 0;
- int n, ret;
-
- t = mt76_get_txwi(dev);
- if (!t) {
- ieee80211_free_txskb(dev->hw, skb);
- return -ENOMEM;
- }
-
- dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
- DMA_TO_DEVICE);
- ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,
- &tx_info);
- dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi),
- DMA_TO_DEVICE);
- if (ret < 0)
- goto free;
-
- len = skb->len - skb->data_len;
- addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
- if (dma_mapping_error(dev->dev, addr)) {
- ret = -ENOMEM;
- goto free;
- }
-
- n = 0;
- buf[n].addr = t->dma_addr;
- buf[n++].len = dev->drv->txwi_size;
- buf[n].addr = addr;
- buf[n++].len = len;
-
- skb_walk_frags(skb, iter) {
- if (n == ARRAY_SIZE(buf))
- goto unmap;
-
- addr = dma_map_single(dev->dev, iter->data, iter->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev->dev, addr))
- goto unmap;
-
- buf[n].addr = addr;
- buf[n++].len = iter->len;
- }
-
- if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
- goto unmap;
-
- return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t);
-
-unmap:
- ret = -ENOMEM;
- for (n--; n > 0; n--)
- dma_unmap_single(dev->dev, buf[n].addr, buf[n].len,
- DMA_TO_DEVICE);
-
-free:
- e.skb = skb;
- e.txwi = t;
- dev->drv->tx_complete_skb(dev, q, &e, true);
- mt76_put_txwi(dev, t);
- return ret;
-}
-EXPORT_SYMBOL_GPL(mt76_tx_queue_skb);
-
void
mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb)
@@ -185,7 +111,7 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
q = &dev->q_tx[qid];
spin_lock_bh(&q->lock);
- mt76_tx_queue_skb(dev, q, skb, wcid, sta);
+ dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
dev->queue_ops->kick(dev, q);
if (q->queued > q->ndesc - 8)
@@ -241,7 +167,7 @@ mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
info->flags |= IEEE80211_TX_STATUS_EOSP;
mt76_skb_set_moredata(skb, !last);
- mt76_tx_queue_skb(dev, hwq, skb, wcid, sta);
+ dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, sta);
}
void
@@ -321,7 +247,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
if (ampdu)
mt76_check_agg_ssn(mtxq, skb);
- idx = mt76_tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
+ idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
if (idx < 0)
return idx;
@@ -356,7 +282,8 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
if (cur_ampdu)
mt76_check_agg_ssn(mtxq, skb);
- idx = mt76_tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
+ idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid,
+ txq->sta);
if (idx < 0)
return idx;
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
new file mode 100644
index 000000000000..7780b07543bb
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -0,0 +1,845 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76.h"
+#include "usb_trace.h"
+#include "dma.h"
+
+#define MT_VEND_REQ_MAX_RETRY 10
+#define MT_VEND_REQ_TOUT_MS 300
+
+/* should be called with usb_ctrl_mtx locked */
+static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
+ u8 req_type, u16 val, u16 offset,
+ void *buf, size_t len)
+{
+ struct usb_interface *intf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ unsigned int pipe;
+ int i, ret;
+
+ pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
+ : usb_sndctrlpipe(udev, 0);
+ for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
+ if (test_bit(MT76_REMOVED, &dev->state))
+ return -EIO;
+
+ ret = usb_control_msg(udev, pipe, req, req_type, val,
+ offset, buf, len, MT_VEND_REQ_TOUT_MS);
+ if (ret == -ENODEV)
+ set_bit(MT76_REMOVED, &dev->state);
+ if (ret >= 0 || ret == -ENODEV)
+ return ret;
+ usleep_range(5000, 10000);
+ }
+
+ dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
+ req, offset, ret);
+ return ret;
+}
+
+int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
+ u8 req_type, u16 val, u16 offset,
+ void *buf, size_t len)
+{
+ int ret;
+
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ret = __mt76u_vendor_request(dev, req, req_type,
+ val, offset, buf, len);
+ trace_usb_reg_wr(dev, offset, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76u_vendor_request);
+
+/* should be called with usb_ctrl_mtx locked */
+static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
+{
+ struct mt76_usb *usb = &dev->usb;
+ u32 data = ~0;
+ u16 offset;
+ int ret;
+ u8 req;
+
+ switch (addr & MT_VEND_TYPE_MASK) {
+ case MT_VEND_TYPE_EEPROM:
+ req = MT_VEND_READ_EEPROM;
+ break;
+ case MT_VEND_TYPE_CFG:
+ req = MT_VEND_READ_CFG;
+ break;
+ default:
+ req = MT_VEND_MULTI_READ;
+ break;
+ }
+ offset = addr & ~MT_VEND_TYPE_MASK;
+
+ ret = __mt76u_vendor_request(dev, req,
+ USB_DIR_IN | USB_TYPE_VENDOR,
+ 0, offset, usb->data, sizeof(__le32));
+ if (ret == sizeof(__le32))
+ data = get_unaligned_le32(usb->data);
+ trace_usb_reg_rr(dev, addr, data);
+
+ return data;
+}
+
+u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
+{
+ u32 ret;
+
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ret = __mt76u_rr(dev, addr);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return ret;
+}
+
+/* should be called with usb_ctrl_mtx locked */
+static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
+ struct mt76_usb *usb = &dev->usb;
+ u16 offset;
+ u8 req;
+
+ switch (addr & MT_VEND_TYPE_MASK) {
+ case MT_VEND_TYPE_CFG:
+ req = MT_VEND_WRITE_CFG;
+ break;
+ default:
+ req = MT_VEND_MULTI_WRITE;
+ break;
+ }
+ offset = addr & ~MT_VEND_TYPE_MASK;
+
+ put_unaligned_le32(val, usb->data);
+ __mt76u_vendor_request(dev, req,
+ USB_DIR_OUT | USB_TYPE_VENDOR, 0,
+ offset, usb->data, sizeof(__le32));
+ trace_usb_reg_wr(dev, addr, val);
+}
+
+void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ __mt76u_wr(dev, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+
+static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
+ u32 mask, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ val |= __mt76u_rr(dev, addr) & ~mask;
+ __mt76u_wr(dev, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return val;
+}
+
+static void mt76u_copy(struct mt76_dev *dev, u32 offset,
+ const void *data, int len)
+{
+ struct mt76_usb *usb = &dev->usb;
+ const u32 *val = data;
+ int i, ret;
+
+ mutex_lock(&usb->usb_ctrl_mtx);
+ for (i = 0; i < (len / 4); i++) {
+ put_unaligned_le32(val[i], usb->data);
+ ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ 0, offset + i * 4, usb->data,
+ sizeof(__le32));
+ if (ret < 0)
+ break;
+ }
+ mutex_unlock(&usb->usb_ctrl_mtx);
+}
+
+void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
+ const u16 offset, const u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ __mt76u_vendor_request(dev, req,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ val & 0xffff, offset, NULL, 0);
+ __mt76u_vendor_request(dev, req,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ val >> 16, offset + 2, NULL, 0);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+EXPORT_SYMBOL_GPL(mt76u_single_wr);
+
+static int
+mt76u_set_endpoints(struct usb_interface *intf,
+ struct mt76_usb *usb)
+{
+ struct usb_host_interface *intf_desc = intf->cur_altsetting;
+ struct usb_endpoint_descriptor *ep_desc;
+ int i, in_ep = 0, out_ep = 0;
+
+ for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
+ ep_desc = &intf_desc->endpoint[i].desc;
+
+ if (usb_endpoint_is_bulk_in(ep_desc) &&
+ in_ep < __MT_EP_IN_MAX) {
+ usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
+ usb->in_max_packet = usb_endpoint_maxp(ep_desc);
+ in_ep++;
+ } else if (usb_endpoint_is_bulk_out(ep_desc) &&
+ out_ep < __MT_EP_OUT_MAX) {
+ usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
+ usb->out_max_packet = usb_endpoint_maxp(ep_desc);
+ out_ep++;
+ }
+ }
+
+ if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
+ return -EINVAL;
+ return 0;
+}
+
+static int
+mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
+ int nsgs, int len, int sglen)
+{
+ struct urb *urb = buf->urb;
+ int i;
+
+ for (i = 0; i < nsgs; i++) {
+ struct page *page;
+ void *data;
+ int offset;
+
+ data = netdev_alloc_frag(len);
+ if (!data)
+ break;
+
+ page = virt_to_head_page(data);
+ offset = data - page_address(page);
+ sg_set_page(&urb->sg[i], page, sglen, offset);
+ }
+
+ if (i < nsgs) {
+ int j;
+
+ for (j = nsgs; j < urb->num_sgs; j++)
+ skb_free_frag(sg_virt(&urb->sg[j]));
+ urb->num_sgs = i;
+ }
+
+ urb->num_sgs = max_t(int, i, urb->num_sgs);
+ buf->len = urb->num_sgs * sglen,
+ sg_init_marker(urb->sg, urb->num_sgs);
+
+ return i ? : -ENOMEM;
+}
+
+int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
+ int nsgs, int len, int sglen, gfp_t gfp)
+{
+ buf->urb = usb_alloc_urb(0, gfp);
+ if (!buf->urb)
+ return -ENOMEM;
+
+ buf->urb->sg = devm_kzalloc(dev->dev, nsgs * sizeof(*buf->urb->sg),
+ gfp);
+ if (!buf->urb->sg)
+ return -ENOMEM;
+
+ sg_init_table(buf->urb->sg, nsgs);
+ buf->dev = dev;
+
+ return mt76u_fill_rx_sg(dev, buf, nsgs, len, sglen);
+}
+EXPORT_SYMBOL_GPL(mt76u_buf_alloc);
+
+void mt76u_buf_free(struct mt76u_buf *buf)
+{
+ struct urb *urb = buf->urb;
+ int i;
+
+ for (i = 0; i < urb->num_sgs; i++)
+ skb_free_frag(sg_virt(&urb->sg[i]));
+ usb_free_urb(buf->urb);
+}
+EXPORT_SYMBOL_GPL(mt76u_buf_free);
+
+int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
+ struct mt76u_buf *buf, gfp_t gfp,
+ usb_complete_t complete_fn, void *context)
+{
+ struct usb_interface *intf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ unsigned int pipe;
+
+ if (dir == USB_DIR_IN)
+ pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
+ else
+ pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
+
+ usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, buf->len,
+ complete_fn, context);
+
+ return usb_submit_urb(buf->urb, gfp);
+}
+EXPORT_SYMBOL_GPL(mt76u_submit_buf);
+
+static inline struct mt76u_buf
+*mt76u_get_next_rx_entry(struct mt76_queue *q)
+{
+ struct mt76u_buf *buf = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->lock, flags);
+ if (q->queued > 0) {
+ buf = &q->entry[q->head].ubuf;
+ q->head = (q->head + 1) % q->ndesc;
+ q->queued--;
+ }
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return buf;
+}
+
+static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
+{
+ u16 dma_len, min_len;
+
+ dma_len = get_unaligned_le16(data);
+ min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
+ MT_FCE_INFO_LEN;
+
+ if (data_len < min_len || WARN_ON(!dma_len) ||
+ WARN_ON(dma_len + MT_DMA_HDR_LEN > data_len) ||
+ WARN_ON(dma_len & 0x3))
+ return -EINVAL;
+ return dma_len;
+}
+
+static int
+mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
+{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ u8 *data = sg_virt(&urb->sg[0]);
+ int data_len, len, nsgs = 1;
+ struct sk_buff *skb;
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
+ return 0;
+
+ len = mt76u_get_rx_entry_len(data, urb->actual_length);
+ if (len < 0)
+ return 0;
+
+ skb = build_skb(data, q->buf_size);
+ if (!skb)
+ return 0;
+
+ data_len = min_t(int, len, urb->sg[0].length - MT_DMA_HDR_LEN);
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+ if (skb->tail + data_len > skb->end) {
+ dev_kfree_skb(skb);
+ return 1;
+ }
+
+ __skb_put(skb, data_len);
+ len -= data_len;
+
+ while (len > 0) {
+ data_len = min_t(int, len, urb->sg[nsgs].length);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ sg_page(&urb->sg[nsgs]),
+ urb->sg[nsgs].offset,
+ data_len, q->buf_size);
+ len -= data_len;
+ nsgs++;
+ }
+ dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
+
+ return nsgs;
+}
+
+static void mt76u_complete_rx(struct urb *urb)
+{
+ struct mt76_dev *dev = urb->context;
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ unsigned long flags;
+
+ switch (urb->status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ case -ENOENT:
+ return;
+ default:
+ dev_err(dev->dev, "rx urb failed: %d\n", urb->status);
+ /* fall through */
+ case 0:
+ break;
+ }
+
+ spin_lock_irqsave(&q->lock, flags);
+ if (WARN_ONCE(q->entry[q->tail].ubuf.urb != urb, "rx urb mismatch"))
+ goto out;
+
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued++;
+ tasklet_schedule(&dev->usb.rx_tasklet);
+out:
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+
+static void mt76u_rx_tasklet(unsigned long data)
+{
+ struct mt76_dev *dev = (struct mt76_dev *)data;
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ int err, nsgs, buf_len = q->buf_size;
+ struct mt76u_buf *buf;
+
+ rcu_read_lock();
+
+ while (true) {
+ buf = mt76u_get_next_rx_entry(q);
+ if (!buf)
+ break;
+
+ nsgs = mt76u_process_rx_entry(dev, buf->urb);
+ if (nsgs > 0) {
+ err = mt76u_fill_rx_sg(dev, buf, nsgs,
+ buf_len,
+ SKB_WITH_OVERHEAD(buf_len));
+ if (err < 0)
+ break;
+ }
+ mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
+ buf, GFP_ATOMIC,
+ mt76u_complete_rx, dev);
+ }
+ mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
+
+ rcu_read_unlock();
+}
+
+int mt76u_submit_rx_buffers(struct mt76_dev *dev)
+{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ unsigned long flags;
+ int i, err = 0;
+
+ spin_lock_irqsave(&q->lock, flags);
+ for (i = 0; i < q->ndesc; i++) {
+ err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
+ &q->entry[i].ubuf, GFP_ATOMIC,
+ mt76u_complete_rx, dev);
+ if (err < 0)
+ break;
+ }
+ q->head = q->tail = 0;
+ q->queued = 0;
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mt76u_submit_rx_buffers);
+
+static int mt76u_alloc_rx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ int i, err, nsgs;
+
+ spin_lock_init(&q->lock);
+ q->entry = devm_kzalloc(dev->dev,
+ MT_NUM_RX_ENTRIES * sizeof(*q->entry),
+ GFP_KERNEL);
+ if (!q->entry)
+ return -ENOMEM;
+
+ if (mt76u_check_sg(dev)) {
+ q->buf_size = MT_RX_BUF_SIZE;
+ nsgs = MT_SG_MAX_SIZE;
+ } else {
+ q->buf_size = PAGE_SIZE;
+ nsgs = 1;
+ }
+
+ for (i = 0; i < MT_NUM_RX_ENTRIES; i++) {
+ err = mt76u_buf_alloc(dev, &q->entry[i].ubuf,
+ nsgs, q->buf_size,
+ SKB_WITH_OVERHEAD(q->buf_size),
+ GFP_KERNEL);
+ if (err < 0)
+ return err;
+ }
+ q->ndesc = MT_NUM_RX_ENTRIES;
+
+ return mt76u_submit_rx_buffers(dev);
+}
+
+static void mt76u_free_rx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ int i;
+
+ for (i = 0; i < q->ndesc; i++)
+ mt76u_buf_free(&q->entry[i].ubuf);
+}
+
+static void mt76u_stop_rx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ int i;
+
+ for (i = 0; i < q->ndesc; i++)
+ usb_kill_urb(q->entry[i].ubuf.urb);
+}
+
+int mt76u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
+{
+ struct sk_buff *iter, *last = skb;
+ u32 info, pad;
+
+ /* Buffer layout:
+ * | 4B | xfer len | pad | 4B |
+ * | TXINFO | pkt/cmd | zero pad to 4B | zero |
+ *
+ * length field of TXINFO should be set to 'xfer len'.
+ */
+ info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
+ FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags;
+ put_unaligned_le32(info, skb_push(skb, sizeof(info)));
+
+ pad = round_up(skb->len, 4) + 4 - skb->len;
+ skb_walk_frags(skb, iter) {
+ last = iter;
+ if (!iter->next) {
+ skb->data_len += pad;
+ skb->len += pad;
+ break;
+ }
+ }
+
+ if (unlikely(pad)) {
+ if (__skb_pad(last, pad, true))
+ return -ENOMEM;
+ __skb_put(last, pad);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76u_skb_dma_info);
+
+static void mt76u_tx_tasklet(unsigned long data)
+{
+ struct mt76_dev *dev = (struct mt76_dev *)data;
+ struct mt76u_buf *buf;
+ struct mt76_queue *q;
+ bool wake;
+ int i;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ q = &dev->q_tx[i];
+
+ spin_lock_bh(&q->lock);
+ while (true) {
+ buf = &q->entry[q->head].ubuf;
+ if (!buf->done || !q->queued)
+ break;
+
+ dev->drv->tx_complete_skb(dev, q,
+ &q->entry[q->head],
+ false);
+
+ if (q->entry[q->head].schedule) {
+ q->entry[q->head].schedule = false;
+ q->swq_queued--;
+ }
+
+ q->head = (q->head + 1) % q->ndesc;
+ q->queued--;
+ }
+ mt76_txq_schedule(dev, q);
+ wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
+ if (!q->queued)
+ wake_up(&dev->tx_wait);
+
+ spin_unlock_bh(&q->lock);
+
+ if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
+ ieee80211_queue_delayed_work(dev->hw,
+ &dev->usb.stat_work,
+ msecs_to_jiffies(10));
+
+ if (wake)
+ ieee80211_wake_queue(dev->hw, i);
+ }
+}
+
+static void mt76u_tx_status_data(struct work_struct *work)
+{
+ struct mt76_usb *usb;
+ struct mt76_dev *dev;
+ u8 update = 1;
+ u16 count = 0;
+
+ usb = container_of(work, struct mt76_usb, stat_work.work);
+ dev = container_of(usb, struct mt76_dev, usb);
+
+ while (true) {
+ if (test_bit(MT76_REMOVED, &dev->state))
+ break;
+
+ if (!dev->drv->tx_status_data(dev, &update))
+ break;
+ count++;
+ }
+
+ if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
+ ieee80211_queue_delayed_work(dev->hw, &usb->stat_work,
+ msecs_to_jiffies(10));
+ else
+ clear_bit(MT76_READING_STATS, &dev->state);
+}
+
+static void mt76u_complete_tx(struct urb *urb)
+{
+ struct mt76u_buf *buf = urb->context;
+ struct mt76_dev *dev = buf->dev;
+
+ if (mt76u_urb_error(urb))
+ dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
+ buf->done = true;
+
+ tasklet_schedule(&dev->usb.tx_tasklet);
+}
+
+static int
+mt76u_tx_build_sg(struct sk_buff *skb, struct urb *urb)
+{
+ int nsgs = 1 + skb_shinfo(skb)->nr_frags;
+ struct sk_buff *iter;
+
+ skb_walk_frags(skb, iter)
+ nsgs += 1 + skb_shinfo(iter)->nr_frags;
+
+ memset(urb->sg, 0, sizeof(*urb->sg) * MT_SG_MAX_SIZE);
+
+ nsgs = min_t(int, MT_SG_MAX_SIZE, nsgs);
+ sg_init_marker(urb->sg, nsgs);
+ urb->num_sgs = nsgs;
+
+ return skb_to_sgvec_nomark(skb, urb->sg, 0, skb->len);
+}
+
+static int
+mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta)
+{
+ struct usb_interface *intf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ u8 ep = q2ep(q->hw_idx);
+ struct mt76u_buf *buf;
+ u16 idx = q->tail;
+ unsigned int pipe;
+ int err;
+
+ if (q->queued == q->ndesc)
+ return -ENOSPC;
+
+ err = dev->drv->tx_prepare_skb(dev, NULL, skb, q, wcid, sta, NULL);
+ if (err < 0)
+ return err;
+
+ buf = &q->entry[idx].ubuf;
+ buf->done = false;
+
+ err = mt76u_tx_build_sg(skb, buf->urb);
+ if (err < 0)
+ return err;
+
+ pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[ep]);
+ usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, skb->len,
+ mt76u_complete_tx, buf);
+
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->entry[idx].skb = skb;
+ q->queued++;
+
+ return idx;
+}
+
+static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ struct mt76u_buf *buf;
+ int err;
+
+ while (q->first != q->tail) {
+ buf = &q->entry[q->first].ubuf;
+ err = usb_submit_urb(buf->urb, GFP_ATOMIC);
+ if (err < 0) {
+ if (err == -ENODEV)
+ set_bit(MT76_REMOVED, &dev->state);
+ else
+ dev_err(dev->dev, "tx urb submit failed:%d\n",
+ err);
+ break;
+ }
+ q->first = (q->first + 1) % q->ndesc;
+ }
+}
+
+static int mt76u_alloc_tx(struct mt76_dev *dev)
+{
+ struct mt76u_buf *buf;
+ struct mt76_queue *q;
+ size_t size;
+ int i, j;
+
+ size = MT_SG_MAX_SIZE * sizeof(struct scatterlist);
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ q = &dev->q_tx[i];
+ spin_lock_init(&q->lock);
+ INIT_LIST_HEAD(&q->swq);
+ q->hw_idx = q2hwq(i);
+
+ q->entry = devm_kzalloc(dev->dev,
+ MT_NUM_TX_ENTRIES * sizeof(*q->entry),
+ GFP_KERNEL);
+ if (!q->entry)
+ return -ENOMEM;
+
+ q->ndesc = MT_NUM_TX_ENTRIES;
+ for (j = 0; j < q->ndesc; j++) {
+ buf = &q->entry[j].ubuf;
+ buf->dev = dev;
+
+ buf->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!buf->urb)
+ return -ENOMEM;
+
+ buf->urb->sg = devm_kzalloc(dev->dev, size, GFP_KERNEL);
+ if (!buf->urb->sg)
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+static void mt76u_free_tx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q;
+ int i, j;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ q = &dev->q_tx[i];
+ for (j = 0; j < q->ndesc; j++)
+ usb_free_urb(q->entry[j].ubuf.urb);
+ }
+}
+
+static void mt76u_stop_tx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q;
+ int i, j;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ q = &dev->q_tx[i];
+ for (j = 0; j < q->ndesc; j++)
+ usb_kill_urb(q->entry[j].ubuf.urb);
+ }
+}
+
+void mt76u_stop_queues(struct mt76_dev *dev)
+{
+ tasklet_disable(&dev->usb.rx_tasklet);
+ tasklet_disable(&dev->usb.tx_tasklet);
+
+ mt76u_stop_rx(dev);
+ mt76u_stop_tx(dev);
+}
+EXPORT_SYMBOL_GPL(mt76u_stop_queues);
+
+void mt76u_stop_stat_wk(struct mt76_dev *dev)
+{
+ cancel_delayed_work_sync(&dev->usb.stat_work);
+ clear_bit(MT76_READING_STATS, &dev->state);
+}
+EXPORT_SYMBOL_GPL(mt76u_stop_stat_wk);
+
+void mt76u_queues_deinit(struct mt76_dev *dev)
+{
+ mt76u_stop_queues(dev);
+
+ mt76u_free_rx(dev);
+ mt76u_free_tx(dev);
+}
+EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
+
+int mt76u_alloc_queues(struct mt76_dev *dev)
+{
+ int err;
+
+ err = mt76u_alloc_rx(dev);
+ if (err < 0)
+ goto err;
+
+ err = mt76u_alloc_tx(dev);
+ if (err < 0)
+ goto err;
+
+ return 0;
+err:
+ mt76u_queues_deinit(dev);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
+
+static const struct mt76_queue_ops usb_queue_ops = {
+ .tx_queue_skb = mt76u_tx_queue_skb,
+ .kick = mt76u_tx_kick,
+};
+
+int mt76u_init(struct mt76_dev *dev,
+ struct usb_interface *intf)
+{
+ static const struct mt76_bus_ops mt76u_ops = {
+ .rr = mt76u_rr,
+ .wr = mt76u_wr,
+ .rmw = mt76u_rmw,
+ .copy = mt76u_copy,
+ };
+ struct mt76_usb *usb = &dev->usb;
+
+ tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
+ tasklet_init(&usb->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
+ INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
+ skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
+
+ init_completion(&usb->mcu.cmpl);
+ mutex_init(&usb->mcu.mutex);
+
+ mutex_init(&usb->usb_ctrl_mtx);
+ dev->bus = &mt76u_ops;
+ dev->queue_ops = &usb_queue_ops;
+
+ return mt76u_set_endpoints(intf, usb);
+}
+EXPORT_SYMBOL_GPL(mt76u_init);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/usb_mcu.c
new file mode 100644
index 000000000000..070be803d463
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/usb_mcu.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/firmware.h>
+
+#include "mt76.h"
+#include "dma.h"
+
+#define MT_CMD_HDR_LEN 4
+
+#define MT_FCE_DMA_ADDR 0x0230
+#define MT_FCE_DMA_LEN 0x0234
+
+#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8
+
+struct sk_buff *mt76u_mcu_msg_alloc(const void *data, int len)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(MT_CMD_HDR_LEN + len + 8, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, MT_CMD_HDR_LEN);
+ skb_put_data(skb, data, len);
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_msg_alloc);
+
+void mt76u_mcu_complete_urb(struct urb *urb)
+{
+ struct completion *cmpl = urb->context;
+
+ complete(cmpl);
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_complete_urb);
+
+static int mt76u_mcu_wait_resp(struct mt76_dev *dev, u8 seq)
+{
+ struct mt76_usb *usb = &dev->usb;
+ struct mt76u_buf *buf = &usb->mcu.res;
+ int i, ret;
+ u32 rxfce;
+
+ for (i = 0; i < 5; i++) {
+ if (!wait_for_completion_timeout(&usb->mcu.cmpl,
+ msecs_to_jiffies(300)))
+ continue;
+
+ if (buf->urb->status)
+ return -EIO;
+
+ rxfce = get_unaligned_le32(sg_virt(&buf->urb->sg[0]));
+ ret = mt76u_submit_buf(dev, USB_DIR_IN,
+ MT_EP_IN_CMD_RESP,
+ buf, GFP_KERNEL,
+ mt76u_mcu_complete_urb,
+ &usb->mcu.cmpl);
+ if (ret)
+ return ret;
+
+ if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce))
+ return 0;
+
+ dev_err(dev->dev, "error: MCU resp evt:%lx seq:%hhx-%lx\n",
+ FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce),
+ seq, FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce));
+ }
+
+ dev_err(dev->dev, "error: %s timed out\n", __func__);
+ return -ETIMEDOUT;
+}
+
+int mt76u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
+ int cmd, bool wait_resp)
+{
+ struct usb_interface *intf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct mt76_usb *usb = &dev->usb;
+ unsigned int pipe;
+ int ret, sent;
+ u8 seq = 0;
+ u32 info;
+
+ if (test_bit(MT76_REMOVED, &dev->state))
+ return 0;
+
+ mutex_lock(&usb->mcu.mutex);
+
+ pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
+ if (wait_resp) {
+ seq = ++usb->mcu.msg_seq & 0xf;
+ if (!seq)
+ seq = ++usb->mcu.msg_seq & 0xf;
+ }
+
+ info = FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
+ FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
+ MT_MCU_MSG_TYPE_CMD;
+ ret = mt76u_skb_dma_info(skb, CPU_TX_PORT, info);
+ if (ret)
+ goto out;
+
+ ret = usb_bulk_msg(udev, pipe, skb->data, skb->len, &sent, 500);
+ if (ret)
+ goto out;
+
+ if (wait_resp)
+ ret = mt76u_mcu_wait_resp(dev, seq);
+
+out:
+ mutex_unlock(&usb->mcu.mutex);
+
+ consume_skb(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_send_msg);
+
+void mt76u_mcu_fw_reset(struct mt76_dev *dev)
+{
+ mt76u_vendor_request(dev, MT_VEND_DEV_MODE,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ 0x1, 0, NULL, 0);
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_fw_reset);
+
+static int
+__mt76u_mcu_fw_send_data(struct mt76_dev *dev, struct mt76u_buf *buf,
+ const void *fw_data, int len, u32 dst_addr)
+{
+ u8 *data = sg_virt(&buf->urb->sg[0]);
+ DECLARE_COMPLETION_ONSTACK(cmpl);
+ __le32 info;
+ u32 val;
+ int err;
+
+ info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
+ FIELD_PREP(MT_MCU_MSG_LEN, len) |
+ MT_MCU_MSG_TYPE_CMD);
+
+ memcpy(data, &info, sizeof(info));
+ memcpy(data + sizeof(info), fw_data, len);
+ memset(data + sizeof(info) + len, 0, 4);
+
+ mt76u_single_wr(dev, MT_VEND_WRITE_FCE,
+ MT_FCE_DMA_ADDR, dst_addr);
+ len = roundup(len, 4);
+ mt76u_single_wr(dev, MT_VEND_WRITE_FCE,
+ MT_FCE_DMA_LEN, len << 16);
+
+ buf->len = MT_CMD_HDR_LEN + len + sizeof(info);
+ err = mt76u_submit_buf(dev, USB_DIR_OUT,
+ MT_EP_OUT_INBAND_CMD,
+ buf, GFP_KERNEL,
+ mt76u_mcu_complete_urb, &cmpl);
+ if (err < 0)
+ return err;
+
+ if (!wait_for_completion_timeout(&cmpl,
+ msecs_to_jiffies(1000))) {
+ dev_err(dev->dev, "firmware upload timed out\n");
+ usb_kill_urb(buf->urb);
+ return -ETIMEDOUT;
+ }
+
+ if (mt76u_urb_error(buf->urb)) {
+ dev_err(dev->dev, "firmware upload failed: %d\n",
+ buf->urb->status);
+ return buf->urb->status;
+ }
+
+ val = mt76u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
+ val++;
+ mt76u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
+
+ return 0;
+}
+
+int mt76u_mcu_fw_send_data(struct mt76_dev *dev, const void *data,
+ int data_len, u32 max_payload, u32 offset)
+{
+ int err, len, pos = 0, max_len = max_payload - 8;
+ struct mt76u_buf buf;
+
+ err = mt76u_buf_alloc(dev, &buf, 1, max_payload, max_payload,
+ GFP_KERNEL);
+ if (err < 0)
+ return err;
+
+ while (data_len > 0) {
+ len = min_t(int, data_len, max_len);
+ err = __mt76u_mcu_fw_send_data(dev, &buf, data + pos,
+ len, offset + pos);
+ if (err < 0)
+ break;
+
+ data_len -= len;
+ pos += len;
+ usleep_range(5000, 10000);
+ }
+ mt76u_buf_free(&buf);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_fw_send_data);
+
+int mt76u_mcu_init_rx(struct mt76_dev *dev)
+{
+ struct mt76_usb *usb = &dev->usb;
+ int err;
+
+ err = mt76u_buf_alloc(dev, &usb->mcu.res, 1,
+ MCU_RESP_URB_SIZE, MCU_RESP_URB_SIZE,
+ GFP_KERNEL);
+ if (err < 0)
+ return err;
+
+ err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
+ &usb->mcu.res, GFP_KERNEL,
+ mt76u_mcu_complete_urb,
+ &usb->mcu.cmpl);
+ if (err < 0)
+ mt76u_buf_free(&usb->mcu.res);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_init_rx);
diff --git a/drivers/net/wireless/mediatek/mt76/usb_trace.c b/drivers/net/wireless/mediatek/mt76/usb_trace.c
new file mode 100644
index 000000000000..7e1f540f0b7a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/usb_trace.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "usb_trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/usb_trace.h b/drivers/net/wireless/mediatek/mt76/usb_trace.h
new file mode 100644
index 000000000000..52db7012304a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/usb_trace.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if !defined(__MT76_USB_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT76_USB_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt76.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt76_usb
+
+#define MAXNAME 32
+#define DEV_ENTRY __array(char, wiphy_name, 32)
+#define DEV_ASSIGN strlcpy(__entry->wiphy_name, wiphy_name(dev->hw->wiphy), MAXNAME)
+#define DEV_PR_FMT "%s"
+#define DEV_PR_ARG __entry->wiphy_name
+
+#define REG_ENTRY __field(u32, reg) __field(u32, val)
+#define REG_ASSIGN __entry->reg = reg; __entry->val = val
+#define REG_PR_FMT " %04x=%08x"
+#define REG_PR_ARG __entry->reg, __entry->val
+
+DECLARE_EVENT_CLASS(dev_reg_evt,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ REG_ENTRY
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ REG_ASSIGN;
+ ),
+ TP_printk(
+ DEV_PR_FMT REG_PR_FMT,
+ DEV_PR_ARG, REG_PR_ARG
+ )
+);
+
+DEFINE_EVENT(dev_reg_evt, usb_reg_rr,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_reg_evt, usb_reg_wr,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE usb_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c
index d3b611aaf061..faea99b7a445 100644
--- a/drivers/net/wireless/mediatek/mt7601u/init.c
+++ b/drivers/net/wireless/mediatek/mt7601u/init.c
@@ -603,6 +603,7 @@ int mt7601u_register_device(struct mt7601u_dev *dev)
ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
hw->max_rates = 1;
hw->max_report_rates = 7;
hw->max_rate_tries = 1;
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
index 7b21016012c3..0f1789020960 100644
--- a/drivers/net/wireless/mediatek/mt7601u/main.c
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -308,6 +308,17 @@ mt7601u_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
int idx = key->keyidx;
int ret;
+ /* fall back to sw encryption for unsupported ciphers */
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
if (cmd == SET_KEY) {
key->hw_key_idx = wcid->idx;
wcid->hw_key_idx = idx;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index ae0ca8006849..4aa332f4646b 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -843,6 +843,88 @@ static int qtnf_set_mac_acl(struct wiphy *wiphy,
return ret;
}
+static int qtnf_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ bool enabled, int timeout)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
+ int ret;
+
+ ret = qtnf_cmd_send_pm_set(vif, enabled ? QLINK_PM_AUTO_STANDBY :
+ QLINK_PM_OFF, timeout);
+ if (ret) {
+ pr_err("%s: failed to set PM mode ret=%d\n", dev->name, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int qtnf_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wowlan)
+{
+ struct qtnf_wmac *mac = wiphy_priv(wiphy);
+ struct qtnf_vif *vif;
+ int ret = 0;
+
+ vif = qtnf_mac_get_base_vif(mac);
+ if (!vif) {
+ pr_err("MAC%u: primary VIF is not configured\n", mac->macid);
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ if (!wowlan) {
+ pr_debug("WoWLAN triggers are not enabled\n");
+ qtnf_virtual_intf_cleanup(vif->netdev);
+ goto exit;
+ }
+
+ qtnf_scan_done(vif->mac, true);
+
+ ret = qtnf_cmd_send_wowlan_set(vif, wowlan);
+ if (ret) {
+ pr_err("MAC%u: failed to set WoWLAN triggers\n",
+ mac->macid);
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static int qtnf_resume(struct wiphy *wiphy)
+{
+ struct qtnf_wmac *mac = wiphy_priv(wiphy);
+ struct qtnf_vif *vif;
+ int ret = 0;
+
+ vif = qtnf_mac_get_base_vif(mac);
+ if (!vif) {
+ pr_err("MAC%u: primary VIF is not configured\n", mac->macid);
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ ret = qtnf_cmd_send_wowlan_set(vif, NULL);
+ if (ret) {
+ pr_err("MAC%u: failed to reset WoWLAN triggers\n",
+ mac->macid);
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static void qtnf_set_wakeup(struct wiphy *wiphy, bool enabled)
+{
+ struct qtnf_wmac *mac = wiphy_priv(wiphy);
+ struct qtnf_bus *bus = mac->bus;
+
+ device_set_wakeup_enable(bus->dev, enabled);
+}
+#endif
+
static struct cfg80211_ops qtn_cfg80211_ops = {
.add_virtual_intf = qtnf_add_virtual_intf,
.change_virtual_intf = qtnf_change_virtual_intf,
@@ -869,6 +951,12 @@ static struct cfg80211_ops qtn_cfg80211_ops = {
.channel_switch = qtnf_channel_switch,
.start_radar_detection = qtnf_start_radar_detection,
.set_mac_acl = qtnf_set_mac_acl,
+ .set_power_mgmt = qtnf_set_power_mgmt,
+#ifdef CONFIG_PM
+ .suspend = qtnf_suspend,
+ .resume = qtnf_resume,
+ .set_wakeup = qtnf_set_wakeup,
+#endif
};
static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in,
@@ -921,6 +1009,9 @@ struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus)
if (bus->hw_info.hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD)
qtn_cfg80211_ops.start_radar_detection = NULL;
+ if (!(bus->hw_info.hw_capab & QLINK_HW_CAPAB_PWR_MGMT))
+ qtn_cfg80211_ops.set_power_mgmt = NULL;
+
wiphy = wiphy_new(&qtn_cfg80211_ops, sizeof(struct qtnf_wmac));
if (!wiphy)
return NULL;
@@ -975,7 +1066,8 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
wiphy->retry_long = macinfo->lretry_limit;
wiphy->coverage_class = macinfo->coverage_class;
- wiphy->max_scan_ssids = QTNF_MAX_SSID_LIST_LENGTH;
+ wiphy->max_scan_ssids =
+ (hw_info->max_scan_ssids) ? hw_info->max_scan_ssids : 1;
wiphy->max_scan_ie_len = QTNF_MAX_VSIE_LEN;
wiphy->mgmt_stypes = qtnf_mgmt_stypes;
wiphy->max_remain_on_channel_duration = 5000;
@@ -994,6 +1086,7 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
WIPHY_FLAG_AP_UAPSD |
WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
if (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD)
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD);
@@ -1013,6 +1106,14 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
if (hw_info->hw_capab & QLINK_HW_CAPAB_STA_INACT_TIMEOUT)
wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER;
+ if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR)
+ wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+
+#ifdef CONFIG_PM
+ if (macinfo->wowlan)
+ wiphy->wowlan = macinfo->wowlan;
+#endif
+
if (hw_info->hw_capab & QLINK_HW_CAPAB_REG_UPDATE) {
wiphy->regulatory_flags |= REGULATORY_STRICT_REG |
REGULATORY_CUSTOM_REG;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index c5d94a95e21a..ae9e77300533 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -640,83 +640,83 @@ qtnf_cmd_sta_info_parse(struct station_info *sinfo,
return;
if (qtnf_sta_stat_avail(inactive_time, QLINK_STA_INFO_INACTIVE_TIME)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_INACTIVE_TIME);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME);
sinfo->inactive_time = le32_to_cpu(stats->inactive_time);
}
if (qtnf_sta_stat_avail(connected_time,
QLINK_STA_INFO_CONNECTED_TIME)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_CONNECTED_TIME);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CONNECTED_TIME);
sinfo->connected_time = le32_to_cpu(stats->connected_time);
}
if (qtnf_sta_stat_avail(signal, QLINK_STA_INFO_SIGNAL)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
sinfo->signal = stats->signal - QLINK_RSSI_OFFSET;
}
if (qtnf_sta_stat_avail(signal_avg, QLINK_STA_INFO_SIGNAL_AVG)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
sinfo->signal_avg = stats->signal_avg - QLINK_RSSI_OFFSET;
}
if (qtnf_sta_stat_avail(rxrate, QLINK_STA_INFO_RX_BITRATE)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
qtnf_sta_info_parse_rate(&sinfo->rxrate, &stats->rxrate);
}
if (qtnf_sta_stat_avail(txrate, QLINK_STA_INFO_TX_BITRATE)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
qtnf_sta_info_parse_rate(&sinfo->txrate, &stats->txrate);
}
if (qtnf_sta_stat_avail(sta_flags, QLINK_STA_INFO_STA_FLAGS)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_STA_FLAGS);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_STA_FLAGS);
qtnf_sta_info_parse_flags(&sinfo->sta_flags, &stats->sta_flags);
}
if (qtnf_sta_stat_avail(rx_bytes, QLINK_STA_INFO_RX_BYTES)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES);
sinfo->rx_bytes = le64_to_cpu(stats->rx_bytes);
}
if (qtnf_sta_stat_avail(tx_bytes, QLINK_STA_INFO_TX_BYTES)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES);
sinfo->tx_bytes = le64_to_cpu(stats->tx_bytes);
}
if (qtnf_sta_stat_avail(rx_bytes, QLINK_STA_INFO_RX_BYTES64)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64);
sinfo->rx_bytes = le64_to_cpu(stats->rx_bytes);
}
if (qtnf_sta_stat_avail(tx_bytes, QLINK_STA_INFO_TX_BYTES64)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES64);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64);
sinfo->tx_bytes = le64_to_cpu(stats->tx_bytes);
}
if (qtnf_sta_stat_avail(rx_packets, QLINK_STA_INFO_RX_PACKETS)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS);
sinfo->rx_packets = le32_to_cpu(stats->rx_packets);
}
if (qtnf_sta_stat_avail(tx_packets, QLINK_STA_INFO_TX_PACKETS)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
sinfo->tx_packets = le32_to_cpu(stats->tx_packets);
}
if (qtnf_sta_stat_avail(rx_beacon, QLINK_STA_INFO_BEACON_RX)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX);
sinfo->rx_beacon = le64_to_cpu(stats->rx_beacon);
}
if (qtnf_sta_stat_avail(rx_dropped_misc, QLINK_STA_INFO_RX_DROP_MISC)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_DROP_MISC);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC);
sinfo->rx_dropped_misc = le32_to_cpu(stats->rx_dropped_misc);
}
if (qtnf_sta_stat_avail(tx_failed, QLINK_STA_INFO_TX_FAILED)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_FAILED);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
sinfo->tx_failed = le32_to_cpu(stats->tx_failed);
}
@@ -1092,6 +1092,9 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
case QTN_TLV_ID_UBOOT_VER:
uboot_ver = (const void *)tlv->val;
break;
+ case QTN_TLV_ID_MAX_SCAN_SSIDS:
+ hwinfo->max_scan_ssids = *tlv->val;
+ break;
default:
break;
}
@@ -1135,6 +1138,37 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
return 0;
}
+static void
+qtnf_parse_wowlan_info(struct qtnf_wmac *mac,
+ const struct qlink_wowlan_capab_data *wowlan)
+{
+ struct qtnf_mac_info *mac_info = &mac->macinfo;
+ const struct qlink_wowlan_support *data1;
+ struct wiphy_wowlan_support *supp;
+
+ supp = kzalloc(sizeof(*supp), GFP_KERNEL);
+ if (!supp)
+ return;
+
+ switch (le16_to_cpu(wowlan->version)) {
+ case 0x1:
+ data1 = (struct qlink_wowlan_support *)wowlan->data;
+
+ supp->flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT;
+ supp->n_patterns = le32_to_cpu(data1->n_patterns);
+ supp->pattern_max_len = le32_to_cpu(data1->pattern_max_len);
+ supp->pattern_min_len = le32_to_cpu(data1->pattern_min_len);
+
+ mac_info->wowlan = supp;
+ break;
+ default:
+ pr_warn("MAC%u: unsupported WoWLAN version 0x%x\n",
+ mac->macid, le16_to_cpu(wowlan->version));
+ kfree(supp);
+ break;
+ }
+}
+
static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
const u8 *tlv_buf, size_t tlv_buf_size)
{
@@ -1144,6 +1178,7 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
const struct qlink_iface_comb_num *comb_num;
const struct qlink_iface_limit_record *rec;
const struct qlink_iface_limit *lim;
+ const struct qlink_wowlan_capab_data *wowlan;
u16 rec_len;
u16 tlv_type;
u16 tlv_value_len;
@@ -1252,7 +1287,31 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
ext_capa_mask = (u8 *)tlv->val;
ext_capa_mask_len = tlv_value_len;
break;
+ case QTN_TLV_ID_WOWLAN_CAPAB:
+ if (tlv_value_len < sizeof(*wowlan))
+ return -EINVAL;
+
+ wowlan = (void *)tlv->val;
+ if (!le16_to_cpu(wowlan->len)) {
+ pr_warn("MAC%u: skip empty WoWLAN data\n",
+ mac->macid);
+ break;
+ }
+
+ rec_len = sizeof(*wowlan) + le16_to_cpu(wowlan->len);
+ if (unlikely(tlv_value_len != rec_len)) {
+ pr_warn("MAC%u: WoWLAN data size mismatch\n",
+ mac->macid);
+ return -EINVAL;
+ }
+
+ kfree(mac->macinfo.wowlan);
+ mac->macinfo.wowlan = NULL;
+ qtnf_parse_wowlan_info(mac, wowlan);
+ break;
default:
+ pr_warn("MAC%u: unknown TLV type %u\n",
+ mac->macid, tlv_type);
break;
}
@@ -2234,6 +2293,22 @@ static void qtnf_cmd_channel_tlv_add(struct sk_buff *cmd_skb,
qchan->chan.flags = cpu_to_le32(flags);
}
+static void qtnf_cmd_randmac_tlv_add(struct sk_buff *cmd_skb,
+ const u8 *mac_addr,
+ const u8 *mac_addr_mask)
+{
+ struct qlink_random_mac_addr *randmac;
+ struct qlink_tlv_hdr *hdr =
+ skb_put(cmd_skb, sizeof(*hdr) + sizeof(*randmac));
+
+ hdr->type = cpu_to_le16(QTN_TLV_ID_RANDOM_MAC_ADDR);
+ hdr->len = cpu_to_le16(sizeof(*randmac));
+ randmac = (struct qlink_random_mac_addr *)hdr->val;
+
+ memcpy(randmac->mac_addr, mac_addr, ETH_ALEN);
+ memcpy(randmac->mac_addr_mask, mac_addr_mask, ETH_ALEN);
+}
+
int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
{
struct sk_buff *cmd_skb;
@@ -2244,11 +2319,6 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
int count = 0;
int ret;
- if (scan_req->n_ssids > QTNF_MAX_SSID_LIST_LENGTH) {
- pr_err("MAC%u: too many SSIDs in scan request\n", mac->macid);
- return -EINVAL;
- }
-
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
QLINK_CMD_SCAN,
sizeof(struct qlink_cmd));
@@ -2291,6 +2361,15 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
}
}
+ if (scan_req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ pr_debug("MAC%u: scan with random addr=%pM, mask=%pM\n",
+ mac->macid,
+ scan_req->mac_addr, scan_req->mac_addr_mask);
+
+ qtnf_cmd_randmac_tlv_add(cmd_skb, scan_req->mac_addr,
+ scan_req->mac_addr_mask);
+ }
+
ret = qtnf_cmd_send(mac->bus, cmd_skb, &res_code);
if (unlikely(ret))
@@ -2774,3 +2853,93 @@ int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
return ret;
}
+
+int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout)
+{
+ struct qtnf_bus *bus = vif->mac->bus;
+ struct sk_buff *cmd_skb;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ struct qlink_cmd_pm_set *cmd;
+ int ret = 0;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_PM_SET, sizeof(*cmd));
+ if (!cmd_skb)
+ return -ENOMEM;
+
+ cmd = (struct qlink_cmd_pm_set *)cmd_skb->data;
+ cmd->pm_mode = pm_mode;
+ cmd->pm_standby_timer = cpu_to_le32(timeout);
+
+ qtnf_bus_lock(bus);
+
+ ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
+
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("cmd exec failed: 0x%.4X\n", res_code);
+ ret = -EFAULT;
+ }
+
+out:
+ qtnf_bus_unlock(bus);
+ return ret;
+}
+
+int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif,
+ const struct cfg80211_wowlan *wowl)
+{
+ struct qtnf_bus *bus = vif->mac->bus;
+ struct sk_buff *cmd_skb;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ struct qlink_cmd_wowlan_set *cmd;
+ u32 triggers = 0;
+ int count = 0;
+ int ret = 0;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_WOWLAN_SET, sizeof(*cmd));
+ if (!cmd_skb)
+ return -ENOMEM;
+
+ qtnf_bus_lock(bus);
+
+ cmd = (struct qlink_cmd_wowlan_set *)cmd_skb->data;
+
+ if (wowl) {
+ if (wowl->disconnect)
+ triggers |= QLINK_WOWLAN_TRIG_DISCONNECT;
+
+ if (wowl->magic_pkt)
+ triggers |= QLINK_WOWLAN_TRIG_MAGIC_PKT;
+
+ if (wowl->n_patterns && wowl->patterns) {
+ triggers |= QLINK_WOWLAN_TRIG_PATTERN_PKT;
+ while (count < wowl->n_patterns) {
+ qtnf_cmd_skb_put_tlv_arr(cmd_skb,
+ QTN_TLV_ID_WOWLAN_PATTERN,
+ wowl->patterns[count].pattern,
+ wowl->patterns[count].pattern_len);
+ count++;
+ }
+ }
+ }
+
+ cmd->triggers = cpu_to_le32(triggers);
+
+ ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
+
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("cmd exec failed: 0x%.4X\n", res_code);
+ ret = -EFAULT;
+ }
+
+out:
+ qtnf_bus_unlock(bus);
+ return ret;
+}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h
index cf9274add26d..1ac41156c192 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h
@@ -76,5 +76,8 @@ int qtnf_cmd_start_cac(const struct qtnf_vif *vif,
u32 cac_time_ms);
int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
const struct cfg80211_acl_data *params);
+int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout);
+int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif,
+ const struct cfg80211_wowlan *wowl);
#endif /* QLINK_COMMANDS_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index a6a450984f9a..19abbc4e23e0 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -179,6 +179,30 @@ static void qtnf_netdev_tx_timeout(struct net_device *ndev)
}
}
+static int qtnf_netdev_set_mac_address(struct net_device *ndev, void *addr)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
+ struct sockaddr *sa = addr;
+ int ret;
+ unsigned char old_addr[ETH_ALEN];
+
+ memcpy(old_addr, sa->sa_data, sizeof(old_addr));
+
+ ret = eth_mac_addr(ndev, sa);
+ if (ret)
+ return ret;
+
+ qtnf_scan_done(vif->mac, true);
+
+ ret = qtnf_cmd_send_change_intf_type(vif, vif->wdev.iftype,
+ sa->sa_data);
+
+ if (ret)
+ memcpy(ndev->dev_addr, old_addr, ETH_ALEN);
+
+ return ret;
+}
+
/* Network device ops handlers */
const struct net_device_ops qtnf_netdev_ops = {
.ndo_open = qtnf_netdev_open,
@@ -186,6 +210,7 @@ const struct net_device_ops qtnf_netdev_ops = {
.ndo_start_xmit = qtnf_netdev_hard_start_xmit,
.ndo_tx_timeout = qtnf_netdev_tx_timeout,
.ndo_get_stats64 = qtnf_netdev_get_stats64,
+ .ndo_set_mac_address = qtnf_netdev_set_mac_address,
};
static int qtnf_mac_init_single_band(struct wiphy *wiphy,
@@ -470,6 +495,7 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid)
qtnf_mac_iface_comb_free(mac);
kfree(mac->macinfo.extended_capabilities);
kfree(mac->macinfo.extended_capabilities_mask);
+ kfree(mac->macinfo.wowlan);
wiphy_free(wiphy);
bus->mac[macid] = NULL;
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
index 214435448335..a1e338a1f055 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -40,7 +40,6 @@
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
-#define QTNF_MAX_SSID_LIST_LENGTH 2
#define QTNF_MAX_VSIE_LEN 255
#define QTNF_MAX_INTF 8
#define QTNF_MAX_EVENT_QUEUE_LEN 255
@@ -111,6 +110,7 @@ struct qtnf_mac_info {
u8 *extended_capabilities;
u8 *extended_capabilities_mask;
u8 extended_capabilities_len;
+ struct wiphy_wowlan_support *wowlan;
};
struct qtnf_chan_stats {
@@ -145,6 +145,7 @@ struct qtnf_hw_info {
u8 total_rx_chain;
char fw_version[ETHTOOL_FWVERS_LEN];
u32 hw_version;
+ u8 max_scan_ssids;
};
struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index f85deda703fb..99d37e3efba6 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -69,11 +69,15 @@ struct qlink_msg_header {
* associated STAs due to inactivity. Inactivity timeout period is taken
* from QLINK_CMD_START_AP parameters.
* @QLINK_HW_CAPAB_DFS_OFFLOAD: device implements DFS offload functionality
+ * @QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR: device supports MAC Address
+ * Randomization in probe requests.
*/
enum qlink_hw_capab {
QLINK_HW_CAPAB_REG_UPDATE = BIT(0),
QLINK_HW_CAPAB_STA_INACT_TIMEOUT = BIT(1),
QLINK_HW_CAPAB_DFS_OFFLOAD = BIT(2),
+ QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR = BIT(3),
+ QLINK_HW_CAPAB_PWR_MGMT = BIT(4),
};
enum qlink_iface_type {
@@ -253,6 +257,8 @@ enum qlink_cmd_type {
QLINK_CMD_CHAN_STATS = 0x0054,
QLINK_CMD_CONNECT = 0x0060,
QLINK_CMD_DISCONNECT = 0x0061,
+ QLINK_CMD_PM_SET = 0x0062,
+ QLINK_CMD_WOWLAN_SET = 0x0063,
};
/**
@@ -665,6 +671,54 @@ struct qlink_acl_data {
struct qlink_mac_address mac_addrs[0];
} __packed;
+/**
+ * enum qlink_pm_mode - Power Management mode
+ *
+ * @QLINK_PM_OFF: normal mode, no power saving enabled
+ * @QLINK_PM_AUTO_STANDBY: enable auto power save mode
+ */
+enum qlink_pm_mode {
+ QLINK_PM_OFF = 0,
+ QLINK_PM_AUTO_STANDBY = 1,
+};
+
+/**
+ * struct qlink_cmd_pm_set - data for QLINK_CMD_PM_SET command
+ *
+ * @pm_standby timer: period of network inactivity in seconds before
+ * putting a radio in power save mode
+ * @pm_mode: power management mode
+ */
+struct qlink_cmd_pm_set {
+ struct qlink_cmd chdr;
+ __le32 pm_standby_timer;
+ u8 pm_mode;
+} __packed;
+
+/**
+ * enum qlink_wowlan_trigger
+ *
+ * @QLINK_WOWLAN_TRIG_DISCONNECT: wakeup on disconnect
+ * @QLINK_WOWLAN_TRIG_MAGIC_PKT: wakeup on magic packet
+ * @QLINK_WOWLAN_TRIG_PATTERN_PKT: wakeup on user-defined packet
+ */
+enum qlink_wowlan_trigger {
+ QLINK_WOWLAN_TRIG_DISCONNECT = BIT(0),
+ QLINK_WOWLAN_TRIG_MAGIC_PKT = BIT(1),
+ QLINK_WOWLAN_TRIG_PATTERN_PKT = BIT(2),
+};
+
+/**
+ * struct qlink_cmd_wowlan_set - data for QLINK_CMD_WOWLAN_SET command
+ *
+ * @triggers: requested bitmask of WoWLAN triggers
+ */
+struct qlink_cmd_wowlan_set {
+ struct qlink_cmd chdr;
+ __le32 triggers;
+ u8 data[0];
+} __packed;
+
/* QLINK Command Responses messages related definitions
*/
@@ -1062,6 +1116,8 @@ struct qlink_event_radar {
* @QTN_TLV_ID_STA_STATS: per-STA statistics as defined by
* &struct qlink_sta_stats. Valid values are marked as such in a bitmap
* carried by QTN_TLV_ID_STA_STATS_MAP.
+ * @QTN_TLV_ID_MAX_SCAN_SSIDS: maximum number of SSIDs the device can scan
+ * for in any given scan.
*/
enum qlink_tlv_id {
QTN_TLV_ID_FRAG_THRESH = 0x0201,
@@ -1089,6 +1145,10 @@ enum qlink_tlv_id {
QTN_TLV_ID_HW_ID = 0x0405,
QTN_TLV_ID_CALIBRATION_VER = 0x0406,
QTN_TLV_ID_UBOOT_VER = 0x0407,
+ QTN_TLV_ID_RANDOM_MAC_ADDR = 0x0408,
+ QTN_TLV_ID_MAX_SCAN_SSIDS = 0x0409,
+ QTN_TLV_ID_WOWLAN_CAPAB = 0x0410,
+ QTN_TLV_ID_WOWLAN_PATTERN = 0x0411,
};
struct qlink_tlv_hdr {
@@ -1360,4 +1420,49 @@ struct qlink_sta_stats {
u8 rsvd[1];
};
+/**
+ * struct qlink_random_mac_addr - data for QTN_TLV_ID_RANDOM_MAC_ADDR TLV
+ *
+ * Specifies MAC address mask/value for generation random MAC address
+ * during scan.
+ *
+ * @mac_addr: MAC address used with randomisation
+ * @mac_addr_mask: MAC address mask used with randomisation, bits that
+ * are 0 in the mask should be randomised, bits that are 1 should
+ * be taken from the @mac_addr
+ */
+struct qlink_random_mac_addr {
+ u8 mac_addr[ETH_ALEN];
+ u8 mac_addr_mask[ETH_ALEN];
+} __packed;
+
+/**
+ * struct qlink_wowlan_capab_data - data for QTN_TLV_ID_WOWLAN_CAPAB TLV
+ *
+ * WoWLAN capabilities supported by cards.
+ *
+ * @version: version of WoWLAN data structure, to ensure backward
+ * compatibility for firmwares with limited WoWLAN support
+ * @len: Total length of WoWLAN data
+ * @data: supported WoWLAN features
+ */
+struct qlink_wowlan_capab_data {
+ __le16 version;
+ __le16 len;
+ u8 data[0];
+} __packed;
+
+/**
+ * struct qlink_wowlan_support - supported WoWLAN capabilities
+ *
+ * @n_patterns: number of supported wakeup patterns
+ * @pattern_max_len: maximum length of each pattern
+ * @pattern_min_len: minimum length of each pattern
+ */
+struct qlink_wowlan_support {
+ __le32 n_patterns;
+ __le32 pattern_max_len;
+ __le32 pattern_min_len;
+} __packed;
+
#endif /* _QTN_QLINK_H_ */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index c380c1f56ba6..fa2fd64084ac 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -527,24 +527,6 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
EXPORT_SYMBOL_GPL(rt2x00mac_set_key);
#endif /* CONFIG_RT2X00_LIB_CRYPTO */
-int rt2x00mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct rt2x00_dev *rt2x00dev = hw->priv;
-
- return rt2x00dev->ops->lib->sta_add(rt2x00dev, vif, sta);
-}
-EXPORT_SYMBOL_GPL(rt2x00mac_sta_add);
-
-int rt2x00mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct rt2x00_dev *rt2x00dev = hw->priv;
-
- return rt2x00dev->ops->lib->sta_remove(rt2x00dev, sta);
-}
-EXPORT_SYMBOL_GPL(rt2x00mac_sta_remove);
-
void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const u8 *mac_addr)
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index a7e0a17aa7e8..08c607c031bc 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -470,7 +470,6 @@ static inline struct rcs __iomem *rcs_base(ray_dev_t *dev)
static int ray_init(struct net_device *dev)
{
int i;
- UCHAR *p;
struct ccs __iomem *pccs;
ray_dev_t *local = netdev_priv(dev);
struct pcmcia_device *link = local->finder;
@@ -513,12 +512,9 @@ static int ray_init(struct net_device *dev)
init_startup_params(local);
/* copy mac address to startup parameters */
- if (parse_addr(phy_addr, local->sparm.b4.a_mac_addr)) {
- p = local->sparm.b4.a_mac_addr;
- } else {
+ if (!parse_addr(phy_addr, local->sparm.b4.a_mac_addr)) {
memcpy(&local->sparm.b4.a_mac_addr,
&local->startup_res.station_addr, ADDRLEN);
- p = local->sparm.b4.a_mac_addr;
}
clear_interrupt(local); /* Clear any interrupt from the card */
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
index fde89866fa8d..51e32df6120b 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
@@ -363,7 +363,7 @@ void rtl8225se_rf_init(struct ieee80211_hw *dev)
rtl8187se_rf_writereg(dev, 0x00, 0x0037); mdelay(11);
rtl8187se_rf_writereg(dev, 0x04, 0x0160); mdelay(11);
rtl8187se_rf_writereg(dev, 0x07, 0x0080); mdelay(11);
- rtl8187se_rf_writereg(dev, 0x02, 0x088D); mdelay(221);
+ rtl8187se_rf_writereg(dev, 0x02, 0x088D); msleep(221);
rtl8187se_rf_writereg(dev, 0x00, 0x0137); mdelay(11);
rtl8187se_rf_writereg(dev, 0x07, 0x0000); mdelay(1);
rtl8187se_rf_writereg(dev, 0x07, 0x0180); mdelay(1);
@@ -386,7 +386,7 @@ void rtl8225se_rf_init(struct ieee80211_hw *dev)
rtl8187se_rf_writereg(dev, 0x00, 0x00BF); mdelay(1);
rtl8187se_rf_writereg(dev, 0x0D, 0x08DF); mdelay(1);
rtl8187se_rf_writereg(dev, 0x02, 0x004D); mdelay(1);
- rtl8187se_rf_writereg(dev, 0x04, 0x0975); mdelay(31);
+ rtl8187se_rf_writereg(dev, 0x04, 0x0975); msleep(31);
rtl8187se_rf_writereg(dev, 0x00, 0x0197); mdelay(1);
rtl8187se_rf_writereg(dev, 0x05, 0x05AB); mdelay(1);
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index 54c9f6ab0c8c..f4122c8fdd97 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -1907,7 +1907,7 @@ void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv)
reject_agg, ctrl_agg_size, agg_size);
rtlpriv->hw->max_rx_aggregation_subframes =
- (ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF);
+ (ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF_HT);
}
EXPORT_SYMBOL(rtl_rx_ampdu_apply);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
index df3facc8e5a4..6597f7cb3411 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
@@ -2818,23 +2818,11 @@ static void btc8723b2ant_action_sco(struct btc_coexist *btcoexist)
/* sw mechanism */
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, true, true,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, true, true,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, true, true,
+ false, false);
} else {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, false, true,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, false, true,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, false, true,
+ false, false);
}
}
@@ -2949,23 +2937,11 @@ static void btc8723b2ant_action_a2dp(struct btc_coexist *btcoexist)
/* sw mechanism */
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, true, false,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, true, false,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, true, false,
+ false, false);
} else {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, false, false,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, false, false,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, false, false,
+ false, false);
}
}
@@ -3008,23 +2984,11 @@ static void btc8723b2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
/* sw mechanism */
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, true, false,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, true, false,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, true, false,
+ false, false);
} else {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, false, false,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, false, false,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, false, false,
+ false, false);
}
}
@@ -3071,23 +3035,11 @@ static void btc8723b2ant_action_pan_edr(struct btc_coexist *btcoexist)
/* sw mechanism */
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, true, false,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, true, false,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, true, false,
+ false, false);
} else {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, false, false,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, false, false,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, false, false,
+ false, false);
}
}
@@ -3121,23 +3073,11 @@ static void btc8723b2ant_action_pan_hs(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, true, false,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, true, false,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, true, false,
+ false, false);
} else {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, false, false,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, false, false,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, false, false,
+ false, false);
}
}
@@ -3189,23 +3129,11 @@ static void btc8723b2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
/* sw mechanism */
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, true, false,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, true, false,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, true, false,
+ false, false);
} else {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, false, false,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, false, false,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, false, false,
+ false, false);
}
}
@@ -3264,23 +3192,11 @@ static void btc8723b2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
/* sw mechanism */
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, true, true,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, true, true,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, true, true,
+ false, false);
} else {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, false, true,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, false, true,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, false, true,
+ false, false);
}
}
@@ -3336,23 +3252,11 @@ static void btc8723b2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
/* sw mechanism */
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, true, true,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, true, true,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, true, true,
+ false, false);
} else {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, false, true,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, false, true,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, false, true,
+ false, false);
}
}
@@ -3436,23 +3340,11 @@ static void btc8723b2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
/* sw mechanism */
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, true, true,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, true, true,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, true, true,
+ false, false);
} else {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, false, true,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, false, true,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, false, true,
+ false, false);
}
}
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 9935bd09db1f..51e4e92d95a0 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2480,7 +2480,7 @@ static void rndis_fill_station_info(struct usbnet *usbdev,
ret = rndis_query_oid(usbdev, RNDIS_OID_GEN_LINK_SPEED, &linkspeed, &len);
if (ret == 0) {
sinfo->txrate.legacy = le32_to_cpu(linkspeed) / 1000;
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
}
len = sizeof(rssi);
@@ -2488,7 +2488,7 @@ static void rndis_fill_station_info(struct usbnet *usbdev,
&rssi, &len);
if (ret == 0) {
sinfo->signal = level_to_qual(le32_to_cpu(rssi));
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
}
}
@@ -2928,6 +2928,8 @@ static void rndis_wlan_auth_indication(struct usbnet *usbdev,
while (buflen >= sizeof(*auth_req)) {
auth_req = (void *)buf;
+ if (buflen < le32_to_cpu(auth_req->length))
+ return;
type = "unknown";
flags = le32_to_cpu(auth_req->flags);
pairwise_error = false;
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index 0761e61591bd..01edf960ff3c 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -26,6 +26,9 @@ static struct ta_metadata metadata_flash_content[] = {
{"flash_content", 0x00010000},
{"rsi/rs9113_wlan_qspi.rps", 0x00010000},
{"rsi/rs9113_wlan_bt_dual_mode.rps", 0x00010000},
+ {"flash_content", 0x00010000},
+ {"rsi/rs9113_ap_bt_dual_mode.rps", 0x00010000},
+
};
int rsi_send_pkt_to_bus(struct rsi_common *common, struct sk_buff *skb)
@@ -54,7 +57,6 @@ int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb)
struct ieee80211_vif *vif;
struct rsi_mgmt_desc *mgmt_desc;
struct skb_info *tx_params;
- struct ieee80211_bss_conf *bss = NULL;
struct rsi_xtended_desc *xtend_desc = NULL;
u8 header_size;
u32 dword_align_bytes = 0;
@@ -88,7 +90,6 @@ int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb)
tx_params->internal_hdr_size = header_size;
memset(&skb->data[0], 0, header_size);
- bss = &vif->bss_conf;
wh = (struct ieee80211_hdr *)&skb->data[header_size];
mgmt_desc = (struct rsi_mgmt_desc *)skb->data;
@@ -145,7 +146,6 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
struct ieee80211_hdr *wh = NULL;
struct ieee80211_tx_info *info;
struct skb_info *tx_params;
- struct ieee80211_bss_conf *bss;
struct rsi_data_desc *data_desc;
struct rsi_xtended_desc *xtend_desc;
u8 ieee80211_size = MIN_802_11_HDR_LEN;
@@ -156,7 +156,6 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
info = IEEE80211_SKB_CB(skb);
vif = info->control.vif;
- bss = &vif->bss_conf;
tx_params = (struct skb_info *)info->driver_data;
header_size = FRAME_DESC_SZ + sizeof(struct rsi_xtended_desc);
@@ -246,7 +245,7 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
}
}
- data_desc->mac_flags = cpu_to_le16(seq_num & 0xfff);
+ data_desc->mac_flags |= cpu_to_le16(seq_num & 0xfff);
data_desc->qid_tid = ((skb->priority & 0xf) |
((tx_params->tid & 0xf) << 4));
data_desc->sta_id = tx_params->sta_id;
@@ -285,7 +284,6 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
struct ieee80211_tx_info *info;
struct skb_info *tx_params;
struct ieee80211_bss_conf *bss;
- struct ieee80211_hdr *wh;
int status = -EINVAL;
u8 header_size;
@@ -301,7 +299,6 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
bss = &vif->bss_conf;
tx_params = (struct skb_info *)info->driver_data;
header_size = tx_params->internal_hdr_size;
- wh = (struct ieee80211_hdr *)&skb->data[header_size];
if (((vif->type == NL80211_IFTYPE_STATION) ||
(vif->type == NL80211_IFTYPE_P2P_CLIENT)) &&
@@ -744,13 +741,11 @@ static int ping_pong_write(struct rsi_hw *adapter, u8 cmd, u8 *addr, u32 size)
static int auto_fw_upgrade(struct rsi_hw *adapter, u8 *flash_content,
u32 content_size)
{
- u8 cmd, *temp_flash_content;
+ u8 cmd;
u32 temp_content_size, num_flash, index;
u32 flash_start_address;
int status;
- temp_flash_content = flash_content;
-
if (content_size > MAX_FLASH_FILE_SIZE) {
rsi_dbg(ERR_ZONE,
"%s: Flash Content size is more than 400K %u\n",
@@ -842,7 +837,6 @@ static int rsi_load_firmware(struct rsi_hw *adapter)
const struct firmware *fw_entry = NULL;
u32 regout_val = 0, content_size;
u16 tmp_regout_val = 0;
- u8 *flash_content = NULL;
struct ta_metadata *metadata_p;
int status;
@@ -904,28 +898,22 @@ static int rsi_load_firmware(struct rsi_hw *adapter)
__func__, metadata_p->name);
return status;
}
- flash_content = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
- if (!flash_content) {
- rsi_dbg(ERR_ZONE, "%s: Failed to copy firmware\n", __func__);
- status = -EIO;
- goto fail;
- }
content_size = fw_entry->size;
rsi_dbg(INFO_ZONE, "FW Length = %d bytes\n", content_size);
/* Get the firmware version */
common->lmac_ver.ver.info.fw_ver[0] =
- flash_content[LMAC_VER_OFFSET] & 0xFF;
+ fw_entry->data[LMAC_VER_OFFSET] & 0xFF;
common->lmac_ver.ver.info.fw_ver[1] =
- flash_content[LMAC_VER_OFFSET + 1] & 0xFF;
- common->lmac_ver.major = flash_content[LMAC_VER_OFFSET + 2] & 0xFF;
+ fw_entry->data[LMAC_VER_OFFSET + 1] & 0xFF;
+ common->lmac_ver.major = fw_entry->data[LMAC_VER_OFFSET + 2] & 0xFF;
common->lmac_ver.release_num =
- flash_content[LMAC_VER_OFFSET + 3] & 0xFF;
- common->lmac_ver.minor = flash_content[LMAC_VER_OFFSET + 4] & 0xFF;
+ fw_entry->data[LMAC_VER_OFFSET + 3] & 0xFF;
+ common->lmac_ver.minor = fw_entry->data[LMAC_VER_OFFSET + 4] & 0xFF;
common->lmac_ver.patch_num = 0;
rsi_print_version(common);
- status = bl_write_header(adapter, flash_content, content_size);
+ status = bl_write_header(adapter, (u8 *)fw_entry->data, content_size);
if (status) {
rsi_dbg(ERR_ZONE,
"%s: RPS Image header loading failed\n",
@@ -967,7 +955,7 @@ fw_upgrade:
rsi_dbg(INFO_ZONE, "Burn Command Pass.. Upgrading the firmware\n");
- status = auto_fw_upgrade(adapter, flash_content, content_size);
+ status = auto_fw_upgrade(adapter, (u8 *)fw_entry->data, content_size);
if (status == 0) {
rsi_dbg(ERR_ZONE, "Firmware upgradation Done\n");
goto load_image_cmd;
@@ -981,13 +969,11 @@ fw_upgrade:
success:
rsi_dbg(ERR_ZONE, "***** Firmware Loading successful *****\n");
- kfree(flash_content);
release_firmware(fw_entry);
return 0;
fail:
rsi_dbg(ERR_ZONE, "##### Firmware loading failed #####\n");
- kfree(flash_content);
release_firmware(fw_entry);
return status;
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 2ca7464b7fa3..4e510cbe0a89 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -416,7 +416,8 @@ static int rsi_mac80211_add_interface(struct ieee80211_hw *hw,
/* Get free vap index */
for (i = 0; i < RSI_MAX_VIFS; i++) {
- if (!adapter->vifs[i]) {
+ if (!adapter->vifs[i] ||
+ !memcmp(vif->addr, adapter->vifs[i]->addr, ETH_ALEN)) {
vap_idx = i;
break;
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c
index 1485a0c89df2..01d99ed985ee 100644
--- a/drivers/net/wireless/rsi/rsi_91x_main.c
+++ b/drivers/net/wireless/rsi/rsi_91x_main.c
@@ -122,7 +122,6 @@ static struct sk_buff *rsi_prepare_skb(struct rsi_common *common,
u8 extended_desc)
{
struct ieee80211_tx_info *info;
- struct skb_info *rx_params;
struct sk_buff *skb = NULL;
u8 payload_offset;
struct ieee80211_vif *vif;
@@ -149,10 +148,6 @@ static struct sk_buff *rsi_prepare_skb(struct rsi_common *common,
vif = rsi_get_vif(common->priv, wh->addr1);
info = IEEE80211_SKB_CB(skb);
- rx_params = (struct skb_info *)info->driver_data;
- rx_params->rssi = rsi_get_rssi(buffer);
- rx_params->channel = rsi_get_connected_channel(vif);
-
return skb;
}
@@ -336,7 +331,6 @@ struct rsi_hw *rsi_91x_init(u16 oper_mode)
spin_lock_init(&adapter->ps_lock);
timer_setup(&common->roc_timer, rsi_roc_timeout, 0);
init_completion(&common->wlan_init_completion);
- common->init_done = true;
adapter->device_model = RSI_DEV_9113;
common->oper_mode = oper_mode;
@@ -374,6 +368,7 @@ struct rsi_hw *rsi_91x_init(u16 oper_mode)
}
#endif
+ common->init_done = true;
return adapter;
err:
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index d0e5937cad6d..1095df7d9573 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -334,20 +334,17 @@ static int rsi_load_radio_caps(struct rsi_common *common)
struct ieee80211_conf *conf = &hw->conf;
if (conf_is_ht40_plus(conf)) {
- radio_caps->radio_cfg_info =
- RSI_CMDDESC_LOWER_20_ENABLE;
- radio_caps->radio_info =
- RSI_CMDDESC_LOWER_20_ENABLE;
+ radio_caps->ppe_ack_rate =
+ cpu_to_le16(LOWER_20_ENABLE |
+ (LOWER_20_ENABLE >> 12));
} else if (conf_is_ht40_minus(conf)) {
- radio_caps->radio_cfg_info =
- RSI_CMDDESC_UPPER_20_ENABLE;
- radio_caps->radio_info =
- RSI_CMDDESC_UPPER_20_ENABLE;
+ radio_caps->ppe_ack_rate =
+ cpu_to_le16(UPPER_20_ENABLE |
+ (UPPER_20_ENABLE >> 12));
} else {
- radio_caps->radio_cfg_info =
- RSI_CMDDESC_40MHZ;
- radio_caps->radio_info =
- RSI_CMDDESC_FULL_40_ENABLE;
+ radio_caps->ppe_ack_rate =
+ cpu_to_le16((BW_40MHZ << 12) |
+ FULL40M_ENABLE);
}
}
}
@@ -749,7 +746,7 @@ int rsi_hal_load_key(struct rsi_common *common,
key_descriptor |= RSI_CIPHER_TKIP;
}
key_descriptor |= RSI_PROTECT_DATA_FRAMES;
- key_descriptor |= ((key_id << RSI_KEY_ID_OFFSET) & RSI_KEY_ID_MASK);
+ key_descriptor |= (key_id << RSI_KEY_ID_OFFSET);
rsi_set_len_qno(&set_key->desc_dword0.len_qno,
(frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q);
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index 416981d99229..5733e440ecaf 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -1394,10 +1394,7 @@ static const struct dev_pm_ops rsi_pm_ops = {
#endif
static const struct sdio_device_id rsi_dev_table[] = {
- { SDIO_DEVICE(0x303, 0x100) },
- { SDIO_DEVICE(0x041B, 0x0301) },
- { SDIO_DEVICE(0x041B, 0x0201) },
- { SDIO_DEVICE(0x041B, 0x9330) },
+ { SDIO_DEVICE(RSI_SDIO_VID_9113, RSI_SDIO_PID_9113) },
{ /* Blank */},
};
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index 6ce6b754df12..c0a163e40402 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -835,11 +835,7 @@ static int rsi_resume(struct usb_interface *intf)
#endif
static const struct usb_device_id rsi_dev_table[] = {
- { USB_DEVICE(0x0303, 0x0100) },
- { USB_DEVICE(0x041B, 0x0301) },
- { USB_DEVICE(0x041B, 0x0201) },
- { USB_DEVICE(0x041B, 0x9330) },
- { USB_DEVICE(0x1618, 0x9113) },
+ { USB_DEVICE(RSI_USB_VID_9113, RSI_USB_PID_9113) },
{ /* Blank */},
};
diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h
index 14620935c925..359fbdf85739 100644
--- a/drivers/net/wireless/rsi/rsi_mgmt.h
+++ b/drivers/net/wireless/rsi/rsi_mgmt.h
@@ -22,7 +22,7 @@
#include "rsi_main.h"
#define MAX_MGMT_PKT_SIZE 512
-#define RSI_NEEDED_HEADROOM 80
+#define RSI_NEEDED_HEADROOM 84
#define RSI_RCV_BUFFER_LEN 2000
#define RSI_11B_MODE 0
diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h
index 353dbdf31e75..66dcd2ec9051 100644
--- a/drivers/net/wireless/rsi/rsi_sdio.h
+++ b/drivers/net/wireless/rsi/rsi_sdio.h
@@ -28,6 +28,9 @@
#include <linux/mmc/sdio_ids.h>
#include "rsi_main.h"
+#define RSI_SDIO_VID_9113 0x041B
+#define RSI_SDIO_PID_9113 0x9330
+
enum sdio_interrupt_type {
BUFFER_FULL = 0x0,
BUFFER_AVAILABLE = 0x2,
diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h
index b6fe79f0a513..5b2eddd1a2ee 100644
--- a/drivers/net/wireless/rsi/rsi_usb.h
+++ b/drivers/net/wireless/rsi/rsi_usb.h
@@ -22,6 +22,9 @@
#include "rsi_main.h"
#include "rsi_common.h"
+#define RSI_USB_VID_9113 0x1618
+#define RSI_USB_PID_9113 0x9113
+
#define USB_INTERNAL_REG_1 0x25000
#define RSI_USB_READY_MAGIC_NUM 0xab
#define FW_STATUS_REG 0x41050012
diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
index 86ccf84ea0c6..597e934c4630 100644
--- a/drivers/net/wireless/ti/wl18xx/debugfs.c
+++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
@@ -20,6 +20,8 @@
*
*/
+#include <linux/pm_runtime.h>
+
#include "../wlcore/debugfs.h"
#include "../wlcore/wlcore.h"
#include "../wlcore/debug.h"
@@ -276,15 +278,18 @@ static ssize_t radar_detection_write(struct file *file,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl18xx_cmd_radar_detection_debug(wl, channel);
if (ret < 0)
count = ret;
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
@@ -315,15 +320,18 @@ static ssize_t dynamic_fw_traces_write(struct file *file,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl18xx_acx_dynamic_fw_traces(wl);
if (ret < 0)
count = ret;
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
@@ -374,9 +382,11 @@ static ssize_t radar_debug_mode_write(struct file *file,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl12xx_for_each_wlvif_ap(wl, wlvif) {
wlcore_cmd_generic_cfg(wl, wlvif,
@@ -384,7 +394,8 @@ static ssize_t radar_debug_mode_write(struct file *file,
wl->radar_debug_mode, 0);
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index 3ca9167d6146..7c83915a7c5e 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -31,7 +31,6 @@
#include "wlcore.h"
#include "debug.h"
#include "wl12xx_80211.h"
-#include "ps.h"
#include "hw_ops.h"
int wl1271_acx_wake_up_conditions(struct wl1271 *wl, struct wl12xx_vif *wlvif,
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 761cf8573a80..903968735a74 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -23,6 +23,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#include <linux/etherdevice.h>
#include <linux/ieee80211.h>
@@ -191,6 +192,12 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
timeout_time = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
+ goto free_vector;
+ }
+
do {
if (time_after(jiffies, timeout_time)) {
wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
@@ -222,6 +229,9 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
} while (!event);
out:
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
+free_vector:
kfree(events_vector);
return ret;
}
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index a2cb408be8aa..aeb74e74698e 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -26,6 +26,7 @@
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/pm_runtime.h>
#include "wlcore.h"
#include "debug.h"
@@ -65,9 +66,11 @@ void wl1271_debugfs_update_stats(struct wl1271 *wl)
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
if (!wl->plt &&
time_after(jiffies, wl->stats.fw_stats_update +
@@ -76,7 +79,8 @@ void wl1271_debugfs_update_stats(struct wl1271 *wl)
wl->stats.fw_stats_update = jiffies;
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -118,14 +122,18 @@ static void chip_op_handler(struct wl1271 *wl, unsigned long value,
return;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
+
return;
+ }
chip_op = arg;
chip_op(wl);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
}
@@ -292,9 +300,11 @@ static ssize_t dynamic_ps_timeout_write(struct file *file,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
/* In case we're already in PSM, trigger it again to set new timeout
* immediately without waiting for re-association
@@ -305,7 +315,8 @@ static ssize_t dynamic_ps_timeout_write(struct file *file,
wl1271_ps_set_mode(wl, wlvif, STATION_AUTO_PS_MODE);
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -359,9 +370,11 @@ static ssize_t forced_ps_write(struct file *file,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
/* In case we're already in PSM, trigger it again to switch mode
* immediately without waiting for re-association
@@ -374,7 +387,8 @@ static ssize_t forced_ps_write(struct file *file,
wl1271_ps_set_mode(wl, wlvif, ps_mode);
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -838,15 +852,18 @@ static ssize_t rx_streaming_interval_write(struct file *file,
wl->conf.rx_streaming.interval = value;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl12xx_for_each_wlvif_sta(wl, wlvif) {
wl1271_recalc_rx_streaming(wl, wlvif);
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
@@ -893,15 +910,18 @@ static ssize_t rx_streaming_always_write(struct file *file,
wl->conf.rx_streaming.always = value;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl12xx_for_each_wlvif_sta(wl, wlvif) {
wl1271_recalc_rx_streaming(wl, wlvif);
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
@@ -940,15 +960,18 @@ static ssize_t beacon_filtering_write(struct file *file,
mutex_lock(&wl->mutex);
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl12xx_for_each_wlvif(wl, wlvif) {
ret = wl1271_acx_beacon_filter_opt(wl, wlvif, !!value);
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
@@ -1019,16 +1042,19 @@ static ssize_t sleep_auth_write(struct file *file,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl1271_acx_sleep_auth(wl, value);
if (ret < 0)
goto out_sleep;
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
@@ -1083,7 +1109,7 @@ static ssize_t dev_mem_read(struct file *file,
* Don't fail if elp_wakeup returns an error, so the device's memory
* could be read even if the FW crashed
*/
- wl1271_ps_elp_wakeup(wl);
+ pm_runtime_get_sync(wl->dev);
/* store current partition and switch partition */
memcpy(&old_part, &wl->curr_part, sizeof(old_part));
@@ -1102,7 +1128,8 @@ read_err:
goto part_err;
part_err:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
skip_read:
mutex_unlock(&wl->mutex);
@@ -1164,7 +1191,7 @@ static ssize_t dev_mem_write(struct file *file, const char __user *user_buf,
* Don't fail if elp_wakeup returns an error, so the device's memory
* could be read even if the FW crashed
*/
- wl1271_ps_elp_wakeup(wl);
+ pm_runtime_get_sync(wl->dev);
/* store current partition and switch partition */
memcpy(&old_part, &wl->curr_part, sizeof(old_part));
@@ -1183,7 +1210,8 @@ write_err:
goto part_err;
part_err:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
skip_write:
mutex_unlock(&wl->mutex);
@@ -1247,8 +1275,9 @@ static ssize_t fw_logger_write(struct file *file,
}
mutex_lock(&wl->mutex);
- ret = wl1271_ps_elp_wakeup(wl);
+ ret = pm_runtime_get_sync(wl->dev);
if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
count = ret;
goto out;
}
@@ -1257,7 +1286,8 @@ static ssize_t fw_logger_write(struct file *file,
ret = wl12xx_cmd_config_fwlog(wl);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 3a51ab116e79..89b0d0fade9f 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -26,6 +26,7 @@
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/pm_runtime.h>
#include "wlcore.h"
#include "debug.h"
@@ -43,6 +44,7 @@
#define WL1271_BOOT_RETRIES 3
#define WL1271_SUSPEND_SLEEP 100
+#define WL1271_WAKEUP_TIMEOUT 500
static char *fwlog_param;
static int fwlog_mem_blocks = -1;
@@ -153,9 +155,11 @@ static void wl1271_rx_streaming_enable_work(struct work_struct *work)
if (!wl->conf.rx_streaming.interval)
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl1271_set_rx_streaming(wl, wlvif, true);
if (ret < 0)
@@ -166,7 +170,8 @@ static void wl1271_rx_streaming_enable_work(struct work_struct *work)
jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
@@ -183,16 +188,19 @@ static void wl1271_rx_streaming_disable_work(struct work_struct *work)
if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl1271_set_rx_streaming(wl, wlvif, false);
if (ret)
goto out_sleep;
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
@@ -229,9 +237,11 @@ static void wlcore_rc_update_work(struct work_struct *work)
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
if (ieee80211_vif_is_mesh(vif)) {
ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
@@ -243,7 +253,8 @@ static void wlcore_rc_update_work(struct work_struct *work)
}
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
@@ -539,15 +550,16 @@ static int wlcore_irq_locked(struct wl1271 *wl)
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
while (!done && loopcount--) {
/*
* In order to avoid a race with the hardirq, clear the flag
- * before acknowledging the chip. Since the mutex is held,
- * wl1271_ps_elp_wakeup cannot be called concurrently.
+ * before acknowledging the chip.
*/
clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
smp_mb__after_atomic();
@@ -641,7 +653,8 @@ static int wlcore_irq_locked(struct wl1271 *wl)
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
return ret;
@@ -796,8 +809,6 @@ void wl12xx_queue_recovery_work(struct wl1271 *wl)
wl->state = WLCORE_STATE_RESTARTING;
set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
- wl1271_ps_elp_wakeup(wl);
- wlcore_disable_interrupts_nosync(wl);
ieee80211_queue_work(wl->hw, &wl->recovery_work);
}
}
@@ -819,6 +830,7 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
{
u32 end_of_log = 0;
+ int error;
if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
return;
@@ -830,8 +842,11 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
* Do not send a stop fwlog command if the fw is hanged or if
* dbgpins are used (due to some fw bug).
*/
- if (wl1271_ps_elp_wakeup(wl))
+ error = pm_runtime_get_sync(wl->dev);
+ if (error < 0) {
+ pm_runtime_put_noidle(wl->dev);
return;
+ }
if (!wl->watchdog_recovery &&
wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
wl12xx_cmd_stop_fwlog(wl);
@@ -919,12 +934,20 @@ static void wl1271_recovery_work(struct work_struct *work)
container_of(work, struct wl1271, recovery_work);
struct wl12xx_vif *wlvif;
struct ieee80211_vif *vif;
+ int error;
mutex_lock(&wl->mutex);
if (wl->state == WLCORE_STATE_OFF || wl->plt)
goto out_unlock;
+ error = pm_runtime_get_sync(wl->dev);
+ if (error < 0) {
+ wl1271_warning("Enable for recovery failed");
+ pm_runtime_put_noidle(wl->dev);
+ }
+ wlcore_disable_interrupts_nosync(wl);
+
if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
wl12xx_read_fwlog_panic(wl);
@@ -958,6 +981,8 @@ static void wl1271_recovery_work(struct work_struct *work)
}
wlcore_op_stop_locked(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
ieee80211_restart_hw(wl->hw);
@@ -978,24 +1003,6 @@ static int wlcore_fw_wakeup(struct wl1271 *wl)
return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
}
-static int wlcore_fw_sleep(struct wl1271 *wl)
-{
- int ret;
-
- mutex_lock(&wl->mutex);
- ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
- if (ret < 0) {
- wl12xx_queue_recovery_work(wl);
- goto out;
- }
- set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
-out:
- mutex_unlock(&wl->mutex);
- mdelay(WL1271_SUSPEND_SLEEP);
-
- return 0;
-}
-
static int wl1271_setup(struct wl1271 *wl)
{
wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
@@ -1184,7 +1191,6 @@ int wl1271_plt_stop(struct wl1271 *wl)
wl1271_flush_deferred_work(wl);
cancel_work_sync(&wl->netstack_work);
cancel_work_sync(&wl->recovery_work);
- cancel_delayed_work_sync(&wl->elp_work);
cancel_delayed_work_sync(&wl->tx_watchdog_work);
mutex_lock(&wl->mutex);
@@ -1719,6 +1725,7 @@ static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif;
+ unsigned long flags;
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
@@ -1734,8 +1741,9 @@ static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
- ret = wl1271_ps_elp_wakeup(wl);
+ ret = pm_runtime_get_sync(wl->dev);
if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
mutex_unlock(&wl->mutex);
return ret;
}
@@ -1765,6 +1773,7 @@ static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
goto out_sleep;
out_sleep:
+ pm_runtime_put_noidle(wl->dev);
mutex_unlock(&wl->mutex);
if (ret < 0) {
@@ -1775,21 +1784,7 @@ out_sleep:
/* flush any remaining work */
wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
- /*
- * disable and re-enable interrupts in order to flush
- * the threaded_irq
- */
- wlcore_disable_interrupts(wl);
-
- /*
- * set suspended flag to avoid triggering a new threaded_irq
- * work. no need for spinlock as interrupts are disabled.
- */
- set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
-
- wlcore_enable_interrupts(wl);
flush_work(&wl->tx_work);
- flush_delayed_work(&wl->elp_work);
/*
* Cancel the watchdog even if above tx_flush failed. We will detect
@@ -1798,15 +1793,14 @@ out_sleep:
cancel_delayed_work(&wl->tx_watchdog_work);
/*
- * Use an immediate call for allowing the firmware to go into power
- * save during suspend.
- * Using a workque for this last write was only hapenning on resume
- * leaving the firmware with power save disabled during suspend,
- * while consuming full power during wowlan suspend.
+ * set suspended flag to avoid triggering a new threaded_irq
+ * work.
*/
- wlcore_fw_sleep(wl);
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
- return 0;
+ return pm_runtime_force_suspend(wl->dev);
}
static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
@@ -1821,6 +1815,12 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
wl->wow_enabled);
WARN_ON(!wl->wow_enabled);
+ ret = pm_runtime_force_resume(wl->dev);
+ if (ret < 0) {
+ wl1271_error("ELP wakeup failure!");
+ goto out_sleep;
+ }
+
/*
* re-enable irq_work enqueuing, and call irq_work directly if
* there is a pending work.
@@ -1857,9 +1857,11 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
goto out_sleep;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl12xx_for_each_wlvif(wl, wlvif) {
if (wlcore_is_p2p_mgmt(wlvif))
@@ -1878,7 +1880,8 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
goto out_sleep;
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
wl->wow_enabled = false;
@@ -1945,7 +1948,6 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
cancel_delayed_work_sync(&wl->scan_complete_work);
cancel_work_sync(&wl->netstack_work);
cancel_work_sync(&wl->tx_work);
- cancel_delayed_work_sync(&wl->elp_work);
cancel_delayed_work_sync(&wl->tx_watchdog_work);
/* let's notify MAC80211 about the remaining pending TX frames */
@@ -2060,13 +2062,16 @@ static void wlcore_channel_switch_work(struct work_struct *work)
vif = wl12xx_wlvif_to_vif(wlvif);
ieee80211_chswitch_done(vif, false);
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl12xx_cmd_stop_channel_switch(wl, wlvif);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
@@ -2128,14 +2133,17 @@ static void wlcore_pending_auth_complete_work(struct work_struct *work)
if (!time_after(time_spare, wlvif->pending_auth_reply_time))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
/* cancel the ROC if active */
wlcore_update_inconn_sta(wl, wlvif, NULL, false);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
@@ -2537,9 +2545,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
wl12xx_get_vif_count(hw, vif, &vif_count);
mutex_lock(&wl->mutex);
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out_unlock;
/*
* in some very corner case HW recovery scenarios its possible to
@@ -2568,14 +2573,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- if (wl12xx_need_fw_change(wl, vif_count, true)) {
- wl12xx_force_active_psm(wl);
- set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
- mutex_unlock(&wl->mutex);
- wl1271_recovery_work(&wl->recovery_work);
- return 0;
- }
-
/*
* TODO: after the nvs issue will be solved, move this block
* to start(), and make sure here the driver is ON.
@@ -2592,6 +2589,24 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
goto out;
}
+ /*
+ * Call runtime PM only after possible wl12xx_init_fw() above
+ * is done. Otherwise we do not have interrupts enabled.
+ */
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
+ goto out_unlock;
+ }
+
+ if (wl12xx_need_fw_change(wl, vif_count, true)) {
+ wl12xx_force_active_psm(wl);
+ set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
+ mutex_unlock(&wl->mutex);
+ wl1271_recovery_work(&wl->recovery_work);
+ return 0;
+ }
+
if (!wlcore_is_p2p_mgmt(wlvif)) {
ret = wl12xx_cmd_role_enable(wl, vif->addr,
role_type, &wlvif->role_id);
@@ -2622,7 +2637,8 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
else
wl->sta_count++;
out:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out_unlock:
mutex_unlock(&wl->mutex);
@@ -2677,9 +2693,11 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
/* disable active roles */
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto deinit;
+ }
if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
wlvif->bss_type == BSS_TYPE_IBSS) {
@@ -2697,7 +2715,8 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
goto deinit;
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
}
deinit:
wl12xx_tx_reset_wlvif(wl, wlvif);
@@ -3121,9 +3140,11 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
/* configure each interface */
wl12xx_for_each_wlvif(wl, wlvif) {
@@ -3133,7 +3154,8 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
}
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -3202,9 +3224,11 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl12xx_for_each_wlvif(wl, wlvif) {
if (wlcore_is_p2p_mgmt(wlvif))
@@ -3247,7 +3271,8 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
*/
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -3454,13 +3479,16 @@ static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
goto out_wake_queues;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out_wake_queues;
+ }
ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out_wake_queues:
if (might_change_spare)
@@ -3600,9 +3628,11 @@ static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
goto out_unlock;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out_unlock;
+ }
wlvif->default_key = key_idx;
@@ -3616,7 +3646,8 @@ static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
}
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out_unlock:
mutex_unlock(&wl->mutex);
@@ -3634,7 +3665,7 @@ void wlcore_regdomain_config(struct wl1271 *wl)
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
+ ret = pm_runtime_get_sync(wl->dev);
if (ret < 0)
goto out;
@@ -3644,7 +3675,8 @@ void wlcore_regdomain_config(struct wl1271 *wl)
goto out;
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
@@ -3678,9 +3710,11 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
/* fail if there is any role in ROC */
if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
@@ -3691,7 +3725,8 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
ret = wlcore_scan(hw->priv, vif, ssid, len, req);
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -3718,9 +3753,11 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
ret = wl->ops->scan_stop(wl, wlvif);
@@ -3741,7 +3778,8 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
ieee80211_scan_completed(wl->hw, &info);
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -3766,9 +3804,11 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
if (ret < 0)
@@ -3777,7 +3817,8 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
wl->sched_vif = wlvif;
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return ret;
@@ -3797,13 +3838,16 @@ static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl->ops->sched_scan_stop(wl, wlvif);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -3822,15 +3866,18 @@ static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl1271_acx_frag_threshold(wl, value);
if (ret < 0)
wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -3851,16 +3898,19 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl12xx_for_each_wlvif(wl, wlvif) {
ret = wl1271_acx_rts_threshold(wl, wlvif, value);
if (ret < 0)
wl1271_warning("set rts threshold failed: %d", ret);
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -4607,9 +4657,11 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
if ((changed & BSS_CHANGED_TXPOWER) &&
bss_conf->txpower != wlvif->power_level) {
@@ -4626,7 +4678,8 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
else
wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -4665,9 +4718,11 @@ static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl12xx_for_each_wlvif(wl, wlvif) {
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
@@ -4690,7 +4745,8 @@ static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
}
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
@@ -4719,9 +4775,11 @@ static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wlvif->band = ctx->def.chan->band;
wlvif->channel = channel;
@@ -4737,7 +4795,8 @@ static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
wlvif->radar_enabled = true;
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -4768,9 +4827,11 @@ static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
if (wlvif->radar_enabled) {
wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
@@ -4778,7 +4839,8 @@ static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
wlvif->radar_enabled = false;
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
@@ -4835,9 +4897,11 @@ wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
for (i = 0; i < n_vifs; i++) {
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
@@ -4847,7 +4911,8 @@ wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
goto out_sleep;
}
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -4878,9 +4943,11 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
/*
* the txop is confed in units of 32us by the mac80211,
@@ -4899,7 +4966,8 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
0, 0);
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -4923,16 +4991,19 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
if (ret < 0)
goto out_sleep;
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -5238,13 +5309,16 @@ static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
if (new_state < old_state)
@@ -5293,9 +5367,11 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
ba_bitmap = &wl->links[hlid].ba_bitmap;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
tid, action);
@@ -5368,7 +5444,8 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
ret = -EINVAL;
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -5402,16 +5479,19 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl1271_set_band_rate(wl, wlvif);
wlvif->basic_rate =
wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
ret = wl1271_acx_sta_rate_policies(wl, wlvif);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
}
out:
mutex_unlock(&wl->mutex);
@@ -5441,9 +5521,11 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
/* TODO: change mac80211 to pass vif as param */
@@ -5465,7 +5547,8 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
}
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -5532,9 +5615,11 @@ static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
if (ret)
@@ -5543,7 +5628,8 @@ static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
@@ -5584,9 +5670,11 @@ static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
if (ret < 0)
@@ -5596,7 +5684,8 @@ static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
msecs_to_jiffies(duration));
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return ret;
@@ -5638,13 +5727,16 @@ static int wlcore_roc_completed(struct wl1271 *wl)
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = __wlcore_roc_completed(wl);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -5719,19 +5811,22 @@ static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out_sleep;
+ }
ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
if (ret < 0)
goto out_sleep;
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
sinfo->signal = rssi_dbm;
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -6065,16 +6160,16 @@ static int wl1271_register_hw(struct wl1271 *wl)
}
if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
- wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.\n");
+ wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
if (!strcmp(pdev_data->family->name, "wl18xx")) {
- wl1271_warning("This default nvs file can be removed from the file system\n");
+ wl1271_warning("This default nvs file can be removed from the file system");
} else {
- wl1271_warning("Your device performance is not optimized.\n");
- wl1271_warning("Please use the calibrator tool to configure your device.\n");
+ wl1271_warning("Your device performance is not optimized.");
+ wl1271_warning("Please use the calibrator tool to configure your device.");
}
if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
- wl1271_warning("Fuse mac address is zero. using random mac\n");
+ wl1271_warning("Fuse mac address is zero. using random mac");
/* Use TI oui and a random nic */
oui_addr = WLCORE_TI_OUI_ADDRESS;
nic_addr = get_random_int();
@@ -6300,7 +6395,6 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
skb_queue_head_init(&wl->deferred_rx_queue);
skb_queue_head_init(&wl->deferred_tx_queue);
- INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
INIT_WORK(&wl->tx_work, wl1271_tx_work);
INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
@@ -6575,6 +6669,99 @@ out:
complete_all(&wl->nvs_loading_complete);
}
+static int __maybe_unused wlcore_runtime_suspend(struct device *dev)
+{
+ struct wl1271 *wl = dev_get_drvdata(dev);
+ struct wl12xx_vif *wlvif;
+ int error;
+
+ /* We do not enter elp sleep in PLT mode */
+ if (wl->plt)
+ return 0;
+
+ /* Nothing to do if no ELP mode requested */
+ if (wl->sleep_auth != WL1271_PSM_ELP)
+ return 0;
+
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
+ test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
+ return -EBUSY;
+ }
+
+ wl1271_debug(DEBUG_PSM, "chip to elp");
+ error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
+ if (error < 0) {
+ wl12xx_queue_recovery_work(wl);
+
+ return error;
+ }
+
+ set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
+
+ return 0;
+}
+
+static int __maybe_unused wlcore_runtime_resume(struct device *dev)
+{
+ struct wl1271 *wl = dev_get_drvdata(dev);
+ DECLARE_COMPLETION_ONSTACK(compl);
+ unsigned long flags;
+ int ret;
+ unsigned long start_time = jiffies;
+ bool pending = false;
+
+ /* Nothing to do if no ELP mode requested */
+ if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
+ return 0;
+
+ wl1271_debug(DEBUG_PSM, "waking up chip from elp");
+
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
+ pending = true;
+ else
+ wl->elp_compl = &compl;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
+ if (ret < 0) {
+ wl12xx_queue_recovery_work(wl);
+ goto err;
+ }
+
+ if (!pending) {
+ ret = wait_for_completion_timeout(&compl,
+ msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
+ if (ret == 0) {
+ wl1271_error("ELP wakeup timeout!");
+ wl12xx_queue_recovery_work(wl);
+
+ /* Return no error for runtime PM for recovery */
+ return 0;
+ }
+ }
+
+ clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
+
+ wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
+ jiffies_to_msecs(jiffies - start_time));
+
+ return 0;
+
+err:
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ wl->elp_compl = NULL;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+ return ret;
+}
+
+static const struct dev_pm_ops wlcore_pm_ops = {
+ SET_RUNTIME_PM_OPS(wlcore_runtime_suspend,
+ wlcore_runtime_resume,
+ NULL)
+};
+
int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
{
struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
@@ -6602,6 +6789,11 @@ int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
wlcore_nvs_cb(NULL, wl);
}
+ wl->dev->driver->pm = &wlcore_pm_ops;
+ pm_runtime_set_autosuspend_delay(wl->dev, 50);
+ pm_runtime_use_autosuspend(wl->dev);
+ pm_runtime_enable(wl->dev);
+
return ret;
}
EXPORT_SYMBOL_GPL(wlcore_probe);
@@ -6610,6 +6802,13 @@ int wlcore_remove(struct platform_device *pdev)
{
struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
struct wl1271 *wl = platform_get_drvdata(pdev);
+ int error;
+
+ error = pm_runtime_get_sync(wl->dev);
+ if (error < 0)
+ dev_warn(wl->dev, "PM runtime failed: %i\n", error);
+
+ wl->dev->driver->pm = NULL;
if (pdev_data->family && pdev_data->family->nvs_name)
wait_for_completion(&wl->nvs_loading_complete);
@@ -6621,6 +6820,11 @@ int wlcore_remove(struct platform_device *pdev)
disable_irq_wake(wl->irq);
}
wl1271_unregister_hw(wl);
+
+ pm_runtime_put_sync(wl->dev);
+ pm_runtime_dont_use_autosuspend(wl->dev);
+ pm_runtime_disable(wl->dev);
+
free_irq(wl->irq, wl);
wlcore_free_hw(wl);
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index b36133b739cb..9de843d1984b 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -26,152 +26,6 @@
#include "tx.h"
#include "debug.h"
-#define WL1271_WAKEUP_TIMEOUT 500
-
-#define ELP_ENTRY_DELAY 30
-#define ELP_ENTRY_DELAY_FORCE_PS 5
-
-void wl1271_elp_work(struct work_struct *work)
-{
- struct delayed_work *dwork;
- struct wl1271 *wl;
- struct wl12xx_vif *wlvif;
- int ret;
-
- dwork = to_delayed_work(work);
- wl = container_of(dwork, struct wl1271, elp_work);
-
- wl1271_debug(DEBUG_PSM, "elp work");
-
- mutex_lock(&wl->mutex);
-
- if (unlikely(wl->state != WLCORE_STATE_ON))
- goto out;
-
- /* our work might have been already cancelled */
- if (unlikely(!test_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
- goto out;
-
- if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
- goto out;
-
- wl12xx_for_each_wlvif(wl, wlvif) {
- if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
- test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
- goto out;
- }
-
- wl1271_debug(DEBUG_PSM, "chip to elp");
- ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
- if (ret < 0) {
- wl12xx_queue_recovery_work(wl);
- goto out;
- }
-
- set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
-
-out:
- mutex_unlock(&wl->mutex);
-}
-
-/* Routines to toggle sleep mode while in ELP */
-void wl1271_ps_elp_sleep(struct wl1271 *wl)
-{
- struct wl12xx_vif *wlvif;
- u32 timeout;
-
- /* We do not enter elp sleep in PLT mode */
- if (wl->plt)
- return;
-
- if (wl->sleep_auth != WL1271_PSM_ELP)
- return;
-
- /* we shouldn't get consecutive sleep requests */
- if (WARN_ON(test_and_set_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
- return;
-
- wl12xx_for_each_wlvif(wl, wlvif) {
- if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
- test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
- return;
- }
-
- timeout = wl->conf.conn.forced_ps ?
- ELP_ENTRY_DELAY_FORCE_PS : ELP_ENTRY_DELAY;
- ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
- msecs_to_jiffies(timeout));
-}
-EXPORT_SYMBOL_GPL(wl1271_ps_elp_sleep);
-
-int wl1271_ps_elp_wakeup(struct wl1271 *wl)
-{
- DECLARE_COMPLETION_ONSTACK(compl);
- unsigned long flags;
- int ret;
- unsigned long start_time = jiffies;
- bool pending = false;
-
- /*
- * we might try to wake up even if we didn't go to sleep
- * before (e.g. on boot)
- */
- if (!test_and_clear_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags))
- return 0;
-
- /* don't cancel_sync as it might contend for a mutex and deadlock */
- cancel_delayed_work(&wl->elp_work);
-
- if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
- return 0;
-
- wl1271_debug(DEBUG_PSM, "waking up chip from elp");
-
- /*
- * The spinlock is required here to synchronize both the work and
- * the completion variable in one entity.
- */
- spin_lock_irqsave(&wl->wl_lock, flags);
- if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
- pending = true;
- else
- wl->elp_compl = &compl;
- spin_unlock_irqrestore(&wl->wl_lock, flags);
-
- ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
- if (ret < 0) {
- wl12xx_queue_recovery_work(wl);
- goto err;
- }
-
- if (!pending) {
- ret = wait_for_completion_timeout(
- &compl, msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
- if (ret == 0) {
- wl1271_error("ELP wakeup timeout!");
- wl12xx_queue_recovery_work(wl);
- ret = -ETIMEDOUT;
- goto err;
- }
- }
-
- clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
-
- wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
- jiffies_to_msecs(jiffies - start_time));
- goto out;
-
-err:
- spin_lock_irqsave(&wl->wl_lock, flags);
- wl->elp_compl = NULL;
- spin_unlock_irqrestore(&wl->wl_lock, flags);
- return ret;
-
-out:
- return 0;
-}
-EXPORT_SYMBOL_GPL(wl1271_ps_elp_wakeup);
-
int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
enum wl1271_cmd_ps_mode mode)
{
diff --git a/drivers/net/wireless/ti/wlcore/ps.h b/drivers/net/wireless/ti/wlcore/ps.h
index de4f9da8ed26..411727587f95 100644
--- a/drivers/net/wireless/ti/wlcore/ps.h
+++ b/drivers/net/wireless/ti/wlcore/ps.h
@@ -29,9 +29,6 @@
int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
enum wl1271_cmd_ps_mode mode);
-void wl1271_ps_elp_sleep(struct wl1271 *wl);
-int wl1271_ps_elp_wakeup(struct wl1271 *wl);
-void wl1271_elp_work(struct work_struct *work);
void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 hlid, bool clean_queues);
void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index 0f15696195f8..078a4940bc5c 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -59,7 +59,7 @@ static u32 wlcore_rx_get_align_buf_size(struct wl1271 *wl, u32 pkt_len)
static void wl1271_rx_status(struct wl1271 *wl,
struct wl1271_rx_descriptor *desc,
struct ieee80211_rx_status *status,
- u8 beacon)
+ u8 beacon, u8 probe_rsp)
{
memset(status, 0, sizeof(struct ieee80211_rx_status));
@@ -106,6 +106,9 @@ static void wl1271_rx_status(struct wl1271 *wl,
}
}
+ if (beacon || probe_rsp)
+ status->boottime_ns = ktime_get_boot_ns();
+
if (beacon)
wlcore_set_pending_regdomain_ch(wl, (u16)desc->channel,
status->band);
@@ -191,7 +194,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
if (ieee80211_is_data_present(hdr->frame_control))
is_data = 1;
- wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
+ wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon,
+ ieee80211_is_probe_resp(hdr->frame_control));
wlcore_hw_set_rx_csum(wl, desc, skb);
seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index 5612f5916b4e..764e723e4ef9 100644
--- a/drivers/net/wireless/ti/wlcore/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -22,13 +22,13 @@
*/
#include <linux/ieee80211.h>
+#include <linux/pm_runtime.h>
#include "wlcore.h"
#include "debug.h"
#include "cmd.h"
#include "scan.h"
#include "acx.h"
-#include "ps.h"
#include "tx.h"
void wl1271_scan_complete_work(struct work_struct *work)
@@ -67,17 +67,17 @@ void wl1271_scan_complete_work(struct work_struct *work)
wl->scan.req = NULL;
wl->scan_wlvif = NULL;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
/* restore hardware connection monitoring template */
wl1271_cmd_build_ap_probe_req(wl, wlvif, wlvif->probereq);
}
- wl1271_ps_elp_sleep(wl);
-
if (wl->scan.failed) {
wl1271_info("Scan completed due to error.");
wl12xx_queue_recovery_work(wl);
@@ -85,6 +85,9 @@ void wl1271_scan_complete_work(struct work_struct *work)
wlcore_cmd_regdomain_config_locked(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
+
ieee80211_scan_completed(wl->hw, &info);
out:
diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c
index d31eb775e023..7425ba9471d0 100644
--- a/drivers/net/wireless/ti/wlcore/sysfs.c
+++ b/drivers/net/wireless/ti/wlcore/sysfs.c
@@ -19,9 +19,11 @@
*
*/
+#include <linux/pm_runtime.h>
+
+#include "acx.h"
#include "wlcore.h"
#include "debug.h"
-#include "ps.h"
#include "sysfs.h"
static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
@@ -68,12 +70,15 @@ static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl1271_acx_sg_enable(wl, wl->sg_enabled);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index 009ec07c4cec..dcb2c8b0feb6 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -22,13 +22,13 @@
*/
#include "testmode.h"
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <net/genetlink.h>
#include "wlcore.h"
#include "debug.h"
#include "acx.h"
-#include "ps.h"
#include "io.h"
#define WL1271_TM_MAX_DATA_LENGTH 1024
@@ -97,9 +97,11 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl1271_cmd_test(wl, buf, buf_len, answer);
if (ret < 0) {
@@ -141,7 +143,8 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
}
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -169,9 +172,11 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
@@ -205,7 +210,8 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
out_free:
kfree(cmd);
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index 00e9b4624dcf..b6e19c2d66b0 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -24,6 +24,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/etherdevice.h>
+#include <linux/pm_runtime.h>
#include <linux/spinlock.h>
#include "wlcore.h"
@@ -868,9 +869,11 @@ void wl1271_tx_work(struct work_struct *work)
int ret;
mutex_lock(&wl->mutex);
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wlcore_tx_work_locked(wl);
if (ret < 0) {
@@ -878,7 +881,8 @@ void wl1271_tx_work(struct work_struct *work)
goto out;
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
index 5c0bcb1fe1a1..dbe78d8491ef 100644
--- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c
+++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
@@ -8,12 +8,13 @@
* version 2 as published by the Free Software Foundation.
*/
+#include <linux/pm_runtime.h>
+
#include <net/mac80211.h>
#include <net/netlink.h>
#include "wlcore.h"
#include "debug.h"
-#include "ps.h"
#include "hw_ops.h"
#include "vendor_cmd.h"
@@ -55,14 +56,17 @@ wlcore_vendor_cmd_smart_config_start(struct wiphy *wiphy,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wlcore_smart_config_start(wl,
nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID]));
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -87,13 +91,16 @@ wlcore_vendor_cmd_smart_config_stop(struct wiphy *wiphy,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wlcore_smart_config_stop(wl);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -131,16 +138,19 @@ wlcore_vendor_cmd_smart_config_set_group_key(struct wiphy *wiphy,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wlcore_smart_config_set_group_key(wl,
nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID]),
nla_len(tb[WLCORE_VENDOR_ATTR_GROUP_KEY]),
nla_data(tb[WLCORE_VENDOR_ATTR_GROUP_KEY]));
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 95fbedc8ea34..d4b1f66ef457 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -348,7 +348,6 @@ struct wl1271 {
enum nl80211_band band;
struct completion *elp_compl;
- struct delayed_work elp_work;
/* in dBm */
int power_level;
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index e840985385fc..32ec121ccac2 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -233,7 +233,6 @@ enum wl12xx_flags {
WL1271_FLAG_TX_QUEUE_STOPPED,
WL1271_FLAG_TX_PENDING,
WL1271_FLAG_IN_ELP,
- WL1271_FLAG_ELP_REQUESTED,
WL1271_FLAG_IRQ_RUNNING,
WL1271_FLAG_FW_TX_BUSY,
WL1271_FLAG_DUMMY_PACKET_PENDING,
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_chip.c b/drivers/net/wireless/zydas/zd1211rw/zd_chip.c
index 07b94eda9604..dd6a86b899eb 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_chip.c
@@ -1341,7 +1341,7 @@ int zd_chip_control_leds(struct zd_chip *chip, enum led_status status)
case ZD_LED_SCANNING:
ioreqs[0].value = FW_LINK_OFF;
ioreqs[1].value = v[1] & ~other_led;
- if (get_seconds() % 3 == 0) {
+ if ((u32)ktime_get_seconds() % 3 == 0) {
ioreqs[1].value &= ~chip->link_led;
} else {
ioreqs[1].value |= chip->link_led;
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
index c30bf118c67d..c2cda3acd4af 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
@@ -371,25 +371,27 @@ static inline void handle_regs_int_override(struct urb *urb)
{
struct zd_usb *usb = urb->context;
struct zd_usb_interrupt *intr = &usb->intr;
+ unsigned long flags;
- spin_lock(&intr->lock);
+ spin_lock_irqsave(&intr->lock, flags);
if (atomic_read(&intr->read_regs_enabled)) {
atomic_set(&intr->read_regs_enabled, 0);
intr->read_regs_int_overridden = 1;
complete(&intr->read_regs.completion);
}
- spin_unlock(&intr->lock);
+ spin_unlock_irqrestore(&intr->lock, flags);
}
static inline void handle_regs_int(struct urb *urb)
{
struct zd_usb *usb = urb->context;
struct zd_usb_interrupt *intr = &usb->intr;
+ unsigned long flags;
int len;
u16 int_num;
ZD_ASSERT(in_interrupt());
- spin_lock(&intr->lock);
+ spin_lock_irqsave(&intr->lock, flags);
int_num = le16_to_cpu(*(__le16 *)(urb->transfer_buffer+2));
if (int_num == CR_INTERRUPT) {
@@ -425,7 +427,7 @@ static inline void handle_regs_int(struct urb *urb)
}
out:
- spin_unlock(&intr->lock);
+ spin_unlock_irqrestore(&intr->lock, flags);
/* CR_INTERRUPT might override read_reg too. */
if (int_num == CR_INTERRUPT && atomic_read(&intr->read_regs_enabled))
@@ -665,6 +667,7 @@ static void rx_urb_complete(struct urb *urb)
struct zd_usb_rx *rx;
const u8 *buffer;
unsigned int length;
+ unsigned long flags;
switch (urb->status) {
case 0:
@@ -693,14 +696,14 @@ static void rx_urb_complete(struct urb *urb)
/* If there is an old first fragment, we don't care. */
dev_dbg_f(urb_dev(urb), "*** first fragment ***\n");
ZD_ASSERT(length <= ARRAY_SIZE(rx->fragment));
- spin_lock(&rx->lock);
+ spin_lock_irqsave(&rx->lock, flags);
memcpy(rx->fragment, buffer, length);
rx->fragment_length = length;
- spin_unlock(&rx->lock);
+ spin_unlock_irqrestore(&rx->lock, flags);
goto resubmit;
}
- spin_lock(&rx->lock);
+ spin_lock_irqsave(&rx->lock, flags);
if (rx->fragment_length > 0) {
/* We are on a second fragment, we believe */
ZD_ASSERT(length + rx->fragment_length <=
@@ -710,9 +713,9 @@ static void rx_urb_complete(struct urb *urb)
handle_rx_packet(usb, rx->fragment,
rx->fragment_length + length);
rx->fragment_length = 0;
- spin_unlock(&rx->lock);
+ spin_unlock_irqrestore(&rx->lock, flags);
} else {
- spin_unlock(&rx->lock);
+ spin_unlock_irqrestore(&rx->lock, flags);
handle_rx_packet(usb, buffer, length);
}